summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDennis Jeon <dennis.jeon@broadcom.corp-partner.google.com>2023-05-25 21:50:16 +0900
committerdennis jeon <dennis.jeon@broadcom.corp-partner.google.com>2023-05-26 11:49:25 +0000
commitabbaed96855d1bbd0a23d932f40c6b731d258ad5 (patch)
tree115f2086e2ed8b3997499da9a9aeecbbb9456ff8
parent3e05450fa7dee4e183ab20388282f45bb249b727 (diff)
downloadbcm4398-abbaed96855d1bbd0a23d932f40c6b731d258ad5.tar.gz
DHD release for bcm4398, 2023.5.25 Version : 103.10.224
Bug: 284432925 Test: SVT test cycle Change-Id: I8c819d0c84976a3e49a03d8d90b99d7a139ac0e0 Signed-off-by: Dennis Jeon <dennis.jeon@broadcom.corp-partner.google.com>
-rw-r--r--Kbuild4
-rw-r--r--bcmcapext.c11
-rw-r--r--bcmstdlib_s.c2
-rw-r--r--bcmutils.c134
-rw-r--r--bcmwifi_channels.c2
-rw-r--r--dhd.h48
-rw-r--r--dhd_common.c213
-rw-r--r--dhd_custom_cis.c11
-rw-r--r--dhd_linux.c295
-rw-r--r--dhd_linux.h4
-rw-r--r--dhd_msgbuf.c101
-rw-r--r--dhd_pcie.c649
-rw-r--r--dhd_pcie_linux.c24
-rw-r--r--dhd_proto.h4
-rw-r--r--dhd_rtt.c1
-rw-r--r--dhd_rtt.h3
-rw-r--r--include/802.11.h5
-rw-r--r--include/bcmdefs.h13
-rw-r--r--include/bcmerror.h7
-rw-r--r--include/bcmevent.h11
-rw-r--r--include/bcmutils.h3
-rw-r--r--include/dnglioctl.h104
-rw-r--r--include/epivers.h16
-rw-r--r--include/event_log_payload.h32
-rw-r--r--include/event_log_set.h14
-rw-r--r--include/event_log_tag.h17
-rw-r--r--include/hndoobr.h5
-rw-r--r--include/linux_pkt.h12
-rw-r--r--include/nan.h11
-rw-r--r--include/phy_event_log_payload.h210
-rw-r--r--include/wlioctl.h437
-rw-r--r--include/wlioctl_defs.h19
-rw-r--r--linux_pkt.c14
-rw-r--r--wb_regon_coordinator.c11
-rw-r--r--wl_android.c14
-rw-r--r--wl_cfg80211.c551
-rw-r--r--wl_cfg80211.h2
-rw-r--r--wl_cfgp2p.c312
-rw-r--r--wl_cfgscan.c172
-rw-r--r--wl_cfgscan.h5
-rw-r--r--wl_cfgvendor.c45
-rw-r--r--wl_cfgvif.c301
-rw-r--r--wldev_common.c28
43 files changed, 2964 insertions, 913 deletions
diff --git a/Kbuild b/Kbuild
index 56dd447..02aebac 100644
--- a/Kbuild
+++ b/Kbuild
@@ -353,6 +353,8 @@ ifneq ($(CONFIG_SOC_GOOGLE),)
DHDCFLAGS += -DDHD_RECOVER_TIMEOUT
# PCIE CPL TIMEOUT WAR
DHDCFLAGS += -DDHD_TREAT_D3ACKTO_AS_LINKDWN
+ # Skip coredump for certain health check traps
+ DHDCFLAGS += -DDHD_SKIP_COREDUMP_ON_HC
endif
endif
@@ -907,7 +909,7 @@ DHDCFLAGS += -DDEBUG_DNGL_INIT_FAIL
DHDCFLAGS += -DDHD_CAP_CUSTOMER="\"hw2 \""
ifneq ($(CONFIG_SOC_GOOGLE),)
# The flag will be enabled only on customer platform
- DHDCFLAGS += -DCUSTOMER_HW2_DEBUG
+ DHDCFLAGS += -DCUSTOMER_HW2 -DCUSTOMER_HW2_DEBUG
DHDCFLAGS += -DDHD_SET_PCIE_DMA_MASK_FOR_GS101
DHDCFLAGS += -DCUSTOM_CONTROL_LOGTRACE=1
DHDCFLAGS += -DDHD_CAP_PLATFORM="\"exynos \""
diff --git a/bcmcapext.c b/bcmcapext.c
index 61ad1ce..94762da 100644
--- a/bcmcapext.c
+++ b/bcmcapext.c
@@ -48,7 +48,6 @@
#include <bcmutils.h>
#include <bcmendian.h>
#include <bcmtlv.h>
-#include <dnglioctl.h>
#include <wlioctl.h>
#include <bcmstdlib_s.h>
@@ -126,6 +125,7 @@ static const capext_bitpos_to_string_map_t capext_pktlat_subfeature_map[] = {
* Insert new entries in the array below in sorted order of output string to be printed
*/
static const capext_bitpos_to_string_map_t capext_rte_features_subfeature_map[] = {
+ CAPEXT_SUBFEATURE_MAP(CAPEXT_RTE_FEATURE_BITPOS_CST, "cst"),
CAPEXT_SUBFEATURE_MAP(CAPEXT_RTE_FEATURE_BITPOS_ECOUNTERS, "ecounters"),
CAPEXT_SUBFEATURE_MAP(CAPEXT_RTE_FEATURE_BITPOS_ETD, "etd_info"),
CAPEXT_SUBFEATURE_MAP(CAPEXT_RTE_FEATURE_BITPOS_EVENT_LOG, "event_log"),
@@ -217,23 +217,23 @@ static const capext_bitpos_to_string_map_t capext_fbt_subfeature_map[] = {
* Insert new entries in the array below in sorted order of output string to be printed
*/
static const capext_bitpos_to_string_map_t capext_wl_features_subfeature_map[] = {
+ CAPEXT_SUBFEATURE_MAP(WLC_CAPEXT_FEATURE_BITPOS_11AZ, "11az"),
CAPEXT_SUBFEATURE_MAP(WLC_CAPEXT_FEATURE_BITPOS_160MHZ_SUPPORT, "160"),
CAPEXT_SUBFEATURE_MAP(WLC_CAPEXT_FEATURE_BITPOS_6G, "6g"),
CAPEXT_SUBFEATURE_MAP(WLC_CAPEXT_FEATURE_BITPOS_802_11d, "802.11d"),
CAPEXT_SUBFEATURE_MAP(WLC_CAPEXT_FEATURE_BITPOS_802_11h, "802.11h"),
- CAPEXT_SUBFEATURE_MAP(WLC_CAPEXT_FEATURE_BITPOS_AMPDU, "ampdu"),
+ CAPEXT_SUBFEATURE_MAP(WLC_CAPEXT_FEATURE_BITPOS_AMPDU, "ampdu"),
CAPEXT_SUBFEATURE_MAP(WLC_CAPEXT_FEATURE_BITPOS_AMSDU, "amsdu"),
CAPEXT_SUBFEATURE_MAP(WLC_CAPEXT_FEATURE_BITPOS_ANQPO, "anqpo"),
CAPEXT_SUBFEATURE_MAP(WLC_CAPEXT_FEATURE_BITPOS_AP, "ap"),
CAPEXT_SUBFEATURE_MAP(WLC_CAPEXT_FEATURE_BITPOS_APF, "apf"),
CAPEXT_SUBFEATURE_MAP(WLC_CAPEXT_FEATURE_BITPOS_ARB, "arb"),
-
CAPEXT_SUBFEATURE_MAP(WLC_CAPEXT_FEATURE_BITPOS_ARPOE, "arpoe"),
CAPEXT_SUBFEATURE_MAP(WLC_CAPEXT_FEATURE_BITPOS_AVOID_BSSID, "avoid-bssid"),
+
CAPEXT_SUBFEATURE_MAP(WLC_CAPEXT_FEATURE_BITPOS_BCMDCS, "bcmdcs"),
CAPEXT_SUBFEATURE_MAP(WLC_CAPEXT_FEATURE_BITPOS_BCNPROT, "bcnprot"),
-
CAPEXT_SUBFEATURE_MAP(WLC_CAPEXT_FEATURE_BITPOS_BCNTRIM, "bcntrim"),
CAPEXT_SUBFEATURE_MAP(WLC_CAPEXT_FEATURE_BITPOS_BDO, "bdo"),
CAPEXT_SUBFEATURE_MAP(WLC_CAPEXT_FEATURE_BITPOS_BGDFS, "bgdfs"),
@@ -519,9 +519,10 @@ bcmcapext_parse_output(void *bufptr, uint16 maxlen, char *outbuf, uint16 outbuf
return BCME_BADARG;
}
- if (ltoh16(capext->version) != CAPEXT_INFO_VERSION) {
+ if (ltoh16(capext->version) != CAPEXT_INFO_VERSION_1) {
return BCME_VERSION;
}
+
payload_len = ltoh16(capext->datalen);
if (payload_len > maxlen) {
diff --git a/bcmstdlib_s.c b/bcmstdlib_s.c
index 2b4b00a..a8ea9e8 100644
--- a/bcmstdlib_s.c
+++ b/bcmstdlib_s.c
@@ -287,7 +287,7 @@ strlcat_s(char *dest, const char *src, size_t size)
if (n != 0) {
/* copy relevant chars (until end of src buf or given size is reached) */
bytes_to_copy = MIN(slen - (size_t)(s - src), n - 1);
- (void)memcpy(d, s, bytes_to_copy);
+ (void)memcpy_s(d, bytes_to_copy, s, bytes_to_copy);
d += bytes_to_copy;
}
}
diff --git a/bcmutils.c b/bcmutils.c
index 1490ba3..8766a3b 100644
--- a/bcmutils.c
+++ b/bcmutils.c
@@ -1644,33 +1644,66 @@ pktlist_dump(pktlist_info_t *pktlist, char *buf)
{
char *obuf = buf;
uint16 i;
+ int olen = 0;
- if (buf != NULL)
- buf += sprintf(buf, "Packet list dump:\n");
- else
+ if (buf != NULL) {
+ olen = snprintf(buf, bufsz, "Packet list dump:\n");
+ if (olen > 0 && (uint)olen < bufsz) {
+ buf += olen;
+ bufsz -= (uint)olen;
+ } else {
+ return obuf;
+ }
+ } else {
printf("Packet list dump:\n");
+ }
for (i = 0; i < (pktlist->count); i++) {
- if (buf != NULL)
- buf += sprintf(buf, "Pkt_addr: 0x%p Line: %d File: %s\t",
+ if (buf != NULL) {
+ olen = snprintf(buf, bufsz, "Pkt_addr: 0x%p Line: %d File: %s\t",
OSL_OBFUSCATE_BUF(pktlist->list[i].pkt), pktlist->list[i].line,
pktlist->list[i].file);
- else
+ if (olen > 0 && (uint)olen < bufsz) {
+ buf += olen;
+ bufsz -= (uint)olen;
+ } else {
+ return obuf;
+ }
+ } else {
printf("Pkt_addr: 0x%p Line: %d File: %s\t",
OSL_OBFUSCATE_BUF(pktlist->list[i].pkt),
pktlist->list[i].line, pktlist->list[i].file);
+ }
/* #ifdef NOTDEF Remove this ifdef to print pkttag and pktdata */
if (buf != NULL) {
if (PKTTAG(pktlist->list[i].pkt)) {
/* Print pkttag */
- buf += sprintf(buf, "Pkttag(in hex): ");
- buf += bcm_format_hex(buf, PKTTAG(pktlist->list[i].pkt),
- OSL_PKTTAG_SZ);
+ olen = snprintf(buf, bufsz, "Pkttag(in hex): ");
+ if (olen > 0 && (uint)olen < bufsz) {
+ buf += olen;
+ bufsz -= (uint)olen;
+ } else {
+ return obuf;
+ }
+ if (bufsz >= OSL_PKTTAG_SZ) {
+ buf += bcm_format_hex(buf, PKTTAG(pktlist->list[i].pkt),
+ OSL_PKTTAG_SZ);
+ bufsz -= OSL_PKTTAG_SZ;
+ }
+ }
+ olen = snprintf(buf, bufsz, "Pktdata(in hex): ");
+ if (olen > 0 && olen < bufsz) {
+ buf += olen;
+ bufsz -= olen;
+ } else {
+ return obuf;
+ }
+ if (bufsz >= PKTLEN(OSH_NULL, pktlist->list[i].pkt)) {
+ buf += bcm_format_hex(buf, PKTDATA(OSH_NULL, pktlist->list[i].pkt),
+ PKTLEN(OSH_NULL, pktlist->list[i].pkt));
+ bufsz -= PKTLEN(OSH_NULL, pktlist->list[i].pkt);
}
- buf += sprintf(buf, "Pktdata(in hex): ");
- buf += bcm_format_hex(buf, PKTDATA(OSH_NULL, pktlist->list[i].pkt),
- PKTLEN(OSH_NULL, pktlist->list[i].pkt));
} else {
void *pkt = pktlist->list[i].pkt, *npkt;
@@ -1709,10 +1742,17 @@ pktlist_dump(pktlist_info_t *pktlist, char *buf)
}
/* #endif NOTDEF */
- if (buf != NULL)
- buf += sprintf(buf, "\n");
- else
+ if (buf != NULL) {
+ olen = snprintf(buf, bufsz, "\n");
+ if (olen > 0 && (uint)olen < bufsz) {
+ buf += olen;
+ bufsz -= (uint)olen;
+ } else {
+ return obuf;
+ }
+ } else {
printf("\n");
+ }
}
return obuf;
}
@@ -4203,6 +4243,70 @@ hndcrc16(
return crc;
}
+/*******************************************************************************
+ * crc16
+ *
+ * Computes a crc16 over the input data using the ANSI/IBM polynomial:
+ *
+ * x^16 + x^15 +x^2 + 1 (or 0x18005 in hex notation)
+ *
+ * The caller provides the initial value (either CRC16_INIT_VALUE
+ * or the previous returned value) to allow for processing of
+ * discontiguous blocks of data. When generating the CRC the
+ * caller is responsible for complementing the final return value
+ * and inserting it into the byte stream. When checking, a final
+ * return value of CRC16_GOOD_VALUE indicates a valid CRC.
+ */
+static const uint16 crc16_ansi_table[256] = {
+ 0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241,
+ 0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,
+ 0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40,
+ 0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841,
+ 0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40,
+ 0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41,
+ 0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641,
+ 0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040,
+ 0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240,
+ 0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441,
+ 0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41,
+ 0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840,
+ 0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41,
+ 0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40,
+ 0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640,
+ 0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041,
+ 0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240,
+ 0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441,
+ 0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41,
+ 0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840,
+ 0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41,
+ 0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40,
+ 0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640,
+ 0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041,
+ 0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241,
+ 0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440,
+ 0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40,
+ 0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841,
+ 0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40,
+ 0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41,
+ 0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641,
+ 0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040
+};
+
+#define CRC_ANSI_INNER_LOOP(n, c, x) \
+ (c) = ((c) >> 8) ^ crc##n##_ansi_table[((c) ^ (x)) & 0xff]
+
+uint16
+hndcrc16ansi(
+ const uint8 *pdata, /* pointer to array of data to process */
+ uint nbytes, /* number of input data bytes to process */
+ uint16 crc /* either CRC16_INIT_VALUE or previous return value */
+)
+{
+ while (nbytes-- > 0)
+ CRC_ANSI_INNER_LOOP(16, crc, *pdata++);
+ return crc;
+}
+
static const uint32 BCMPOST_TRAP_RODATA(crc32_table)[256] = {
0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA,
0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3,
diff --git a/bcmwifi_channels.c b/bcmwifi_channels.c
index ec16152..616106b 100644
--- a/bcmwifi_channels.c
+++ b/bcmwifi_channels.c
@@ -1099,7 +1099,7 @@ wf_chanspec_iter_init(wf_chanspec_iter_t *iter, chanspec_band_t band, chanspec_b
* If the validation fails then the iterator will return INVCHANSPEC as the current
* chanspec, and wf_chanspec_iter_next() will return FALSE.
*/
- memset(iter, 0, sizeof(*iter));
+ bzero(iter, sizeof(*iter));
iter->state = WF_ITER_DONE;
iter->chanspec = INVCHANSPEC;
diff --git a/dhd.h b/dhd.h
index 7b235bb..1b8e280 100644
--- a/dhd.h
+++ b/dhd.h
@@ -2071,6 +2071,13 @@ typedef struct dhd_pub {
#ifdef DHD_SDTC_ETB_DUMP
bool etb_dump_inited;
#endif /* DHD_SDTC_ETB_DUMP */
+#if defined(DHD_TIMESYNC)
+ void *ts_lock;
+#endif /* DHD_TIMESYNC */
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+ uint *sssr_saqm_buf_before;
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+ uint *sssr_saqm_buf_after;
} dhd_pub_t;
#if defined(__linux__)
@@ -3951,6 +3958,14 @@ extern void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags);
#define DHD_LINUX_GENERAL_UNLOCK(dhdp, flags) do {BCM_REFERENCE(flags);} while (0)
#endif
+#if defined(DHD_TIMESYNC)
+#define DHD_TIMESYNC_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define DHD_TIMESYNC_UNLOCK(lock, flags) osl_spin_unlock(lock, (flags))
+#else
+#define DHD_TIMESYNC_LOCK(lock, flags) do {BCM_REFERENCE(flags);} while (0)
+#define DHD_TIMESYNC_UNLOCK(lock, flags) do {BCM_REFERENCE(flags);} while (0)
+#endif /* DHD_TIMESYNC */
+
#define DHD_BUS_INB_DW_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
#define DHD_BUS_INB_DW_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
@@ -4202,6 +4217,32 @@ extern void dhd_schedule_macdbg_dump(dhd_pub_t *dhdp);
#define SSSR_DUMP_MODE_SSSR 0 /* dump both *before* and *after* files */
#define SSSR_DUMP_MODE_FIS 1 /* dump *after* files only */
+typedef struct sssr_header {
+ uint32 magic; /* should be 53535352 = 'SSSR' */
+ uint16 header_version; /* version number of this SSSR header */
+ uint16 sr_version; /* version of SR version. This is to differentiate changes in SR ASM. */
+ /*
+ * Header length from the next field ?data_len? and upto the start of
+ * binary_data[]. This is 20 bytes for version 0
+ */
+ uint32 header_len;
+ uint32 data_len; /* number of bytes in binary_data[] */
+ uint16 chipid; /* chipid */
+ uint16 chiprev; /* chiprev */
+ /*
+ * For D11 MAC/sAQM cores, the coreid, coreunit & WAR_signature in the dump belong
+ * to respective cores. For the DIG SSSR dump these fields are extracted from the ARM core.
+ */
+ uint16 coreid;
+ uint16 coreunit;
+
+ uint32 war_reg; /* Value of WAR register */
+ uint32 flags; /* For future use */
+
+ uint8 binary_data[];
+} sssr_header_t;
+#define SSSR_HEADER_MAGIC 0x53535352u /* SSSR */
+
extern int dhd_sssr_mempool_init(dhd_pub_t *dhd);
extern void dhd_sssr_mempool_deinit(dhd_pub_t *dhd);
extern int dhd_sssr_dump_init(dhd_pub_t *dhd, bool fis_dump);
@@ -4215,6 +4256,12 @@ extern uint dhd_sssr_dig_buf_addr(dhd_pub_t *dhdp);
extern uint dhd_sssr_mac_buf_size(dhd_pub_t *dhdp, uint8 core_idx);
extern uint dhd_sssr_mac_xmtaddress(dhd_pub_t *dhdp, uint8 core_idx);
extern uint dhd_sssr_mac_xmtdata(dhd_pub_t *dhdp, uint8 core_idx);
+extern int dhd_sssr_mac_war_reg(dhd_pub_t *dhdp, uint8 core_idx, uint32 *war_reg);
+extern int dhd_sssr_arm_war_reg(dhd_pub_t *dhdp, uint32 *war_reg);
+extern int dhd_sssr_saqm_war_reg(dhd_pub_t *dhdp, uint32 *war_reg);
+extern int dhd_sssr_sr_asm_version(dhd_pub_t *dhdp, uint16 *sr_asm_version);
+extern uint dhd_sssr_saqm_buf_size(dhd_pub_t *dhdp);
+extern uint dhd_sssr_saqm_buf_addr(dhd_pub_t *dhdp);
#define DHD_SSSR_MEMPOOL_INIT(dhdp) dhd_sssr_mempool_init(dhdp)
#define DHD_SSSR_MEMPOOL_DEINIT(dhdp) dhd_sssr_mempool_deinit(dhdp)
@@ -4256,6 +4303,7 @@ extern int dhd_coredump_mempool_init(dhd_pub_t *dhd);
extern void dhd_coredump_mempool_deinit(dhd_pub_t *dhd);
#define DHD_COREDUMP_MEMPOOL_INIT(dhdp) dhd_coredump_mempool_init(dhdp)
#define DHD_COREDUMP_MEMPOOL_DEINIT(dhdp) dhd_coredump_mempool_deinit(dhdp)
+#define DHD_COREDUMP_IGNORE_TRAP_SIG "host_wake_asserted_for_too_long"
#else
#define DHD_COREDUMP_MEMPOOL_INIT(dhdp) do { /* noop */ } while (0)
#define DHD_COREDUMP_MEMPOOL_DEINIT(dhdp) do { /* noop */ } while (0)
diff --git a/dhd_common.c b/dhd_common.c
index 3687e11..2159183 100644
--- a/dhd_common.c
+++ b/dhd_common.c
@@ -1064,6 +1064,105 @@ dhd_dump_sssr_reg_info_v3(dhd_pub_t *dhd)
}
static void
+dhd_dump_sssr_reg_info_v5(dhd_pub_t *dhd)
+{
+ sssr_reg_info_cmn_t *sssr_reg_info_cmn = dhd->sssr_reg_info;
+ sssr_reg_info_v5_t *sssr_reg_info = (sssr_reg_info_v5_t *)&sssr_reg_info_cmn->rev5;
+ int i;
+ uint8 num_d11cores = dhd_d11_slices_num_get(dhd);
+
+ DHD_PRINT(("pmu_regs\n"));
+ DHD_PRINT(("pmuintmask0=0x%x pmuintmask1=0x%x resreqtimer=0x%x "
+ "macresreqtimer=0x%x macresreqtimer1=0x%x macresreqtimer2=0x%x"
+ "pmu_min_res_mask=0x%x pmu_max_res_mask=0x%x sssr_max_res_mask=0x%x\n",
+ sssr_reg_info->pmu_regs.base_regs.pmuintmask0,
+ sssr_reg_info->pmu_regs.base_regs.pmuintmask1,
+ sssr_reg_info->pmu_regs.base_regs.resreqtimer,
+ sssr_reg_info->pmu_regs.base_regs.macresreqtimer,
+ sssr_reg_info->pmu_regs.base_regs.macresreqtimer1,
+ sssr_reg_info->pmu_regs.base_regs.macresreqtimer2,
+ sssr_reg_info->pmu_regs.base_regs.pmu_min_res_mask,
+ sssr_reg_info->pmu_regs.base_regs.pmu_max_res_mask,
+ sssr_reg_info->pmu_regs.base_regs.sssr_max_res_mask));
+
+ DHD_PRINT(("chipcommon_regs\n"));
+ DHD_PRINT(("intmask=0x%x powerctrl=0x%x clockcontrolstatus=0x%x powerctrl_mask=0x%x\n",
+ sssr_reg_info->chipcommon_regs.base_regs.intmask,
+ sssr_reg_info->chipcommon_regs.base_regs.powerctrl,
+ sssr_reg_info->chipcommon_regs.base_regs.clockcontrolstatus,
+ sssr_reg_info->chipcommon_regs.base_regs.powerctrl_mask));
+
+ DHD_PRINT(("arm_regs\n"));
+ DHD_PRINT(("clockcontrolstatus=0x%x clockcontrolstatus_val=0x%x"
+ " extrsrcreq=0x%x war_reg=0x%x\n",
+ sssr_reg_info->arm_regs.base_regs.clockcontrolstatus,
+ sssr_reg_info->arm_regs.base_regs.clockcontrolstatus_val,
+ sssr_reg_info->arm_regs.oobr_regs.extrsrcreq,
+ sssr_reg_info->arm_regs.war_reg));
+
+ DHD_PRINT(("pcie_regs\n"));
+ DHD_PRINT(("ltrstate=0x%x clockcontrolstatus=0x%x "
+ "clockcontrolstatus_val=0x%x extrsrcreq=0x%x\n",
+ sssr_reg_info->pcie_regs.base_regs.ltrstate,
+ sssr_reg_info->pcie_regs.base_regs.clockcontrolstatus,
+ sssr_reg_info->pcie_regs.base_regs.clockcontrolstatus_val,
+ sssr_reg_info->pcie_regs.oobr_regs.extrsrcreq));
+
+ for (i = 0; i < num_d11cores; i++) {
+ DHD_PRINT(("mac_regs core[%d]\n", i));
+ DHD_PRINT(("xmtaddress=0x%x xmtdata=0x%x clockcontrolstatus=0x%x "
+ "clockcontrolstatus_val=0x%x extrsrcreq=0x%x war_reg=0x%x\n",
+ sssr_reg_info->mac_regs[i].base_regs.xmtaddress,
+ sssr_reg_info->mac_regs[i].base_regs.xmtdata,
+ sssr_reg_info->mac_regs[i].base_regs.clockcontrolstatus,
+ sssr_reg_info->mac_regs[i].base_regs.clockcontrolstatus_val,
+ sssr_reg_info->mac_regs[i].oobr_regs.extrsrcreq,
+ sssr_reg_info->mac_regs[i].war_reg));
+ DHD_PRINT(("sr_size=0x%x\n", sssr_reg_info->mac_regs[i].sr_size));
+ }
+
+ DHD_PRINT(("saqm_sssr_info base_regs\n"));
+ DHD_PRINT(("clockcontrolstatus=0x%x clockcontrolstatus_val=0x%x "
+ "extrsrcreq=0x%x war_reg=0x%x\n",
+ sssr_reg_info->saqm_sssr_info.base_regs.clockcontrolstatus,
+ sssr_reg_info->saqm_sssr_info.base_regs.clockcontrolstatus_val,
+ sssr_reg_info->saqm_sssr_info.oobr_regs.extrsrcreq,
+ sssr_reg_info->saqm_sssr_info.war_reg));
+ DHD_PRINT(("saqm_sssr_info saqm_sssr_addr=0x%x saqm_sssr_size=0x%x\n",
+ sssr_reg_info->saqm_sssr_info.saqm_sssr_addr,
+ sssr_reg_info->saqm_sssr_info.saqm_sssr_size));
+ DHD_PRINT(("saqm_sssr_info config_regs\n"));
+ DHD_PRINT(("digsr_srcontrol1_addr=0x%x digsr_srcontrol1_clrbit_val=0x%x"
+ " digsr_srcontrol2_addr=0x%x digsr_srcontrol2_setbit_val=0x%x"
+ " pmuchip_ctl_addr_reg=0x%x, pmuchip_ctl_val=0x%x"
+ " pmuchip_ctl_data_reg=0x%x pmuchip_ctl_setbit_val=0x%x\n",
+ sssr_reg_info->saqm_sssr_info.sssr_config_regs.digsr_srcontrol1_addr,
+ sssr_reg_info->saqm_sssr_info.sssr_config_regs.digsr_srcontrol1_clrbit_val,
+ sssr_reg_info->saqm_sssr_info.sssr_config_regs.digsr_srcontrol2_addr,
+ sssr_reg_info->saqm_sssr_info.sssr_config_regs.digsr_srcontrol2_setbit_val,
+ sssr_reg_info->saqm_sssr_info.sssr_config_regs.pmuchip_ctl_addr_reg,
+ sssr_reg_info->saqm_sssr_info.sssr_config_regs.pmuchip_ctl_val,
+ sssr_reg_info->saqm_sssr_info.sssr_config_regs.pmuchip_ctl_data_reg,
+ sssr_reg_info->saqm_sssr_info.sssr_config_regs.pmuchip_ctl_setbit_val));
+
+ DHD_PRINT(("dig_mem_info\n"));
+ DHD_PRINT(("dig_sssr_addr=0x%x dig_sssr_size=0x%x\n",
+ sssr_reg_info->dig_mem_info.dig_sssr_addr,
+ sssr_reg_info->dig_mem_info.dig_sssr_size));
+
+ DHD_PRINT(("fis_mem_info\n"));
+ DHD_PRINT(("fis_addr=0x%x fis_size=0x%x fis_enab=0x%x\n",
+ sssr_reg_info->fis_mem_info.fis_addr,
+ sssr_reg_info->fis_mem_info.fis_size,
+ sssr_reg_info->fis_enab));
+
+ DHD_PRINT(("sssr_all_mem_info\n"));
+ DHD_PRINT(("sysmem_sssr_addr=0x%x sysmem_sssr_size=0x%x\n",
+ sssr_reg_info->sssr_all_mem_info.sysmem_sssr_addr,
+ sssr_reg_info->sssr_all_mem_info.sysmem_sssr_size));
+}
+
+static void
dhd_dump_sssr_reg_info_v4(dhd_pub_t *dhd)
{
sssr_reg_info_cmn_t *sssr_reg_info_cmn = dhd->sssr_reg_info;
@@ -1228,6 +1327,9 @@ dhd_dump_sssr_reg_info(dhd_pub_t *dhd)
DHD_PRINT(("************** SSSR REG INFO start version:%d ****************\n",
sssr_reg_info->version));
switch (sssr_reg_info->version) {
+ case SSSR_REG_INFO_VER_5 :
+ dhd_dump_sssr_reg_info_v5(dhd);
+ break;
case SSSR_REG_INFO_VER_4 :
dhd_dump_sssr_reg_info_v4(dhd);
break;
@@ -1270,6 +1372,11 @@ dhd_get_sssr_reg_info(dhd_pub_t *dhd)
/* Write sssr reg info to output file */
switch (dhd->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_5 :
+ ret = dhd_write_file_and_check(filepath_sssr,
+ (char*)(&dhd->sssr_reg_info->rev5),
+ sizeof(sssr_reg_info_v5_t));
+ break;
case SSSR_REG_INFO_VER_4 :
ret = dhd_write_file_and_check(filepath_sssr,
(char*)(&dhd->sssr_reg_info->rev4),
@@ -1317,6 +1424,9 @@ dhd_get_sssr_bufsize(dhd_pub_t *dhd)
num_d11cores = dhd_d11_slices_num_get(dhd);
switch (dhd->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_5 :
+ sssr_bufsize += dhd->sssr_reg_info->rev5.sssr_all_mem_info.sysmem_sssr_size;
+ break;
case SSSR_REG_INFO_VER_4 :
sssr_bufsize += dhd->sssr_reg_info->rev4.sssr_all_mem_info.sysmem_sssr_size;
break;
@@ -1377,7 +1487,7 @@ int
dhd_sssr_dump_init(dhd_pub_t *dhd, bool fis_dump)
{
int i;
- uint32 sssr_bufsize;
+ uint32 sssr_bufsize = 0;
uint32 mempool_used = 0;
uint8 num_d11cores = 0;
bool alloc_sssr = FALSE;
@@ -1411,6 +1521,11 @@ dhd_sssr_dump_init(dhd_pub_t *dhd, bool fis_dump)
ret = dhd_read_file(filepath_sssr, (char*)(&dhd->sssr_reg_info->rev0),
sizeof(sssr_reg_info_v0_t));
switch (dhd->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_5 :
+ ret = dhd_read_file(filepath_sssr,
+ (char*)(&dhd->sssr_reg_info->rev5),
+ sizeof(sssr_reg_info_v5_t));
+ break;
case SSSR_REG_INFO_VER_4 :
ret = dhd_read_file(filepath_sssr,
(char*)(&dhd->sssr_reg_info->rev4),
@@ -1451,6 +1566,15 @@ dhd_sssr_dump_init(dhd_pub_t *dhd, bool fis_dump)
num_d11cores = dhd_d11_slices_num_get(dhd);
/* Validate structure version and length */
switch (dhd->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_5 :
+ if (dhd->sssr_reg_info->rev5.length != sizeof(sssr_reg_info_v5_t)) {
+ DHD_ERROR(("%s: dhd->sssr_reg_info->rev5.length (%d : %d)"
+ "mismatch on rev5\n", __FUNCTION__,
+ (int)dhd->sssr_reg_info->rev5.length,
+ (int)sizeof(sssr_reg_info_v5_t)));
+ return BCME_ERROR;
+ }
+ break;
case SSSR_REG_INFO_VER_4 :
if (dhd->sssr_reg_info->rev4.length != sizeof(sssr_reg_info_v4_t)) {
DHD_ERROR(("%s: dhd->sssr_reg_info->rev4.length (%d : %d)"
@@ -1522,12 +1646,24 @@ dhd_sssr_dump_init(dhd_pub_t *dhd, bool fis_dump)
#endif /* DHD_SSSR_DUMP_BEFORE_SR */
dhd->sssr_dig_buf_after = NULL;
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+ dhd->sssr_saqm_buf_before = NULL;
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+ dhd->sssr_saqm_buf_after = NULL;
+
/* Allocate memory */
for (i = 0; i < num_d11cores; i++) {
alloc_sssr = FALSE;
sr_size = 0;
switch (dhd->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_5 :
+ if (dhd->sssr_reg_info->rev5.mac_regs[i].sr_size) {
+ alloc_sssr = TRUE;
+ sr_size = dhd->sssr_reg_info->rev5.mac_regs[i].sr_size;
+ sr_size += sizeof(sssr_header_t);
+ }
+ break;
case SSSR_REG_INFO_VER_4 :
if (dhd->sssr_reg_info->rev4.mac_regs[i].sr_size) {
alloc_sssr = TRUE;
@@ -1574,6 +1710,16 @@ dhd_sssr_dump_init(dhd_pub_t *dhd, bool fis_dump)
alloc_sssr = FALSE;
sr_size = 0;
switch (dhd->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_5 :
+ if ((dhd->sssr_reg_info->rev5.length >
+ OFFSETOF(sssr_reg_info_v5_t, sssr_all_mem_info)) &&
+ dhd->sssr_reg_info->rev5.sssr_all_mem_info.sysmem_sssr_addr) {
+ alloc_sssr = TRUE;
+ sr_size =
+ dhd->sssr_reg_info->rev5.sssr_all_mem_info.sysmem_sssr_size;
+ sr_size += sizeof(sssr_header_t);
+ }
+ break;
case SSSR_REG_INFO_VER_4 :
/* for v4 need to use sssr_all_mem_info instead of dig_mem_info */
if ((dhd->sssr_reg_info->rev4.length >
@@ -1624,11 +1770,46 @@ dhd_sssr_dump_init(dhd_pub_t *dhd, bool fis_dump)
/* DIG dump before suspend is not applicable. */
dhd->sssr_dig_buf_before = (uint32 *)(dhd->sssr_mempool + mempool_used);
mempool_used += sr_size;
- ASSERT(mempool_used <= DHD_SSSR_MEMPOOL_SIZE);
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+ }
+
+ /* Allocate dump memory for SAQM */
+ alloc_sssr = FALSE;
+ sr_size = 0;
+ switch (dhd->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_5:
+ if (dhd->sssr_reg_info->rev5.saqm_sssr_info.saqm_sssr_size > 0) {
+ alloc_sssr = TRUE;
+ sr_size =
+ dhd->sssr_reg_info->rev5.saqm_sssr_info.saqm_sssr_size;
+ }
+ break;
+ case SSSR_REG_INFO_VER_4:
+ case SSSR_REG_INFO_VER_3:
+ case SSSR_REG_INFO_VER_2:
+ case SSSR_REG_INFO_VER_1:
+ case SSSR_REG_INFO_VER_0:
+ break;
+ default:
+ DHD_ERROR(("invalid sssr_reg_ver"));
+ return BCME_UNSUPPORTED;
+ }
+
+ if (alloc_sssr) {
+ dhd->sssr_saqm_buf_after = (uint32 *)(dhd->sssr_mempool + mempool_used);
+ mempool_used += sr_size;
+
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+ /* DIG dump before suspend is not applicable. */
+ dhd->sssr_saqm_buf_before = (uint32 *)(dhd->sssr_mempool + mempool_used);
+ mempool_used += sr_size;
#endif /* DHD_SSSR_DUMP_BEFORE_SR */
}
dhd->sssr_inited = TRUE;
+ DHD_PRINT(("%s mempool_used:%d size:%d\n",
+ __FUNCTION__, mempool_used, DHD_SSSR_MEMPOOL_SIZE));
+ ASSERT(mempool_used <= DHD_SSSR_MEMPOOL_SIZE);
return BCME_OK;
@@ -1748,8 +1929,10 @@ dhd_coredump_t dhd_coredump_types[] = {
{DHD_COREDUMP_TYPE_SSSRDUMP_DIG_AFTER, 0, NULL},
{DHD_COREDUMP_TYPE_SOCRAMDUMP, 0, NULL},
#ifdef DHD_SDTC_ETB_DUMP
- {DHD_COREDUMP_TYPE_SDTC_ETB_DUMP, 0, NULL}
+ {DHD_COREDUMP_TYPE_SDTC_ETB_DUMP, 0, NULL},
#endif /* DHD_SDTC_ETB_DUMP */
+ {DHD_COREDUMP_TYPE_SSSRDUMP_SAQM_BEFORE, 0, NULL},
+ {DHD_COREDUMP_TYPE_SSSRDUMP_SAQM_AFTER, 0, NULL}
};
static int dhd_append_sssr_tlv(uint8 *buf_dst, int type_idx, int buf_remain)
@@ -5937,8 +6120,15 @@ wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data,
/* Because WLC_E_ESCAN_RESULT event log are being print too many.
* So, DHD_EVENT() changes to be used DHD_TRACE() in HW4 platform.
*/
- DHD_TRACE(("MACEVENT: %s %d, MAC %s, status %d \n",
- event_name, event_type, eabuf, (int)status));
+ if ((status == WLC_E_STATUS_SUCCESS) || (status == WLC_E_STATUS_ABORT)) {
+ /* print critical scan events via DHD_EVENT */
+ DHD_EVENT(("MACEVENT: %s %d, status %d sync-id %u\n",
+ event_name, event_type, (int)status,
+ dtoh16(escan_result->sync_id)));
+ } else {
+ DHD_TRACE(("MACEVENT: %s %d, MAC %s, status %d \n",
+ event_name, event_type, eabuf, (int)status));
+ }
#else
#if !defined(NDIS)
DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d sync-id %u\n",
@@ -6148,6 +6338,19 @@ wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data,
case WLC_E_OWE_INFO:
DHD_EVENT(("MACEVENT: %s, MAC %s type:%d\n", event_name, eabuf, reason));
break;
+#ifdef WL_MLO
+ case WLC_E_MLO_LINK_INFO:
+ {
+ wl_mlo_link_info_event_v1_t *info =
+ (wl_mlo_link_info_event_v1_t *)event_data;
+ if (info) {
+ DHD_EVENT(("MACEVENT: %s, opcode:%d\n", event_name, info->opcode));
+ } else {
+ DHD_EVENT(("MACEVENT: %s\n", event_name));
+ }
+ break;
+ }
+#endif /* WL_MLO */
default:
DHD_INFO(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
event_name, event_type, eabuf, (int)status, (int)reason,
diff --git a/dhd_custom_cis.c b/dhd_custom_cis.c
index e3c77e6..55c3abf 100644
--- a/dhd_custom_cis.c
+++ b/dhd_custom_cis.c
@@ -100,6 +100,7 @@ typedef struct chip_rev_table {
chip_rev_table_t chip_revs[] = {
{0x4383, {"a0", "a1", "a3", "\0", "\0", "\0", "\0", "\0", "\0", "\0"}},
{0x4398, {"a0", "b0", "c0", "d0", "\0", "\0", "\0", "\0", "\0", "\0"}},
+ {0x4390, {"a0", "\0", "\0", "\0", "\0", "\0", "\0", "\0", "\0", "\0"}},
/* 4389 - not yet supported for now */
{0x4389, {"\0", "\0", "\0", "\0", "\0", "\0", "\0", "\0", "\0", "\0"}},
};
@@ -144,6 +145,12 @@ vid_info_t vid_naming_table_4398[] = {
{ 3, { 0x57, 0x99, }, { "_USI_G5SN_9957_V17" } },
};
+vid_info_t vid_naming_table_4390[] = {
+ /* 4390a0 */
+ { 3, { 0x10, 0x63, }, { "_USI_G6BB_6310_V10" } },
+ { 3, { 0x11, 0x63, }, { "_USI_G6BB_6311_V11" } },
+};
+
#ifdef DHD_USE_CISINFO
#if defined(BCM4335_CHIP)
vid_info_t vid_info[] = {
@@ -899,6 +906,10 @@ dhd_get_fw_nvram_names(dhd_pub_t *dhdp, uint chipid, uint chiprev,
vid_info = vid_naming_table_4398;
vid_info_sz = ARRAYSIZE(vid_naming_table_4398);
break;
+ case BCM4390_CHIP_GRPID:
+ vid_info = vid_naming_table_4390;
+ vid_info_sz = ARRAYSIZE(vid_naming_table_4390);
+ break;
default:
DHD_ERROR(("%s: unrecognized chip id 0x%x !\n",
__FUNCTION__, chipid));
diff --git a/dhd_linux.c b/dhd_linux.c
index d31f1af..72958a6 100644
--- a/dhd_linux.c
+++ b/dhd_linux.c
@@ -1986,23 +1986,25 @@ dhd_update_mlo_peer_info(void *pub, int ifidx, const uint8 *ea, dhd_mlo_peer_inf
if (sta) {
if (peer_info) {
if (sta->peer_info) {
- ret = memset_s(sta->peer_info, sizeof(sta->peer_info), 0,
+ ret = memset_s(sta->peer_info, sizeof(dhd_mlo_peer_info_t), 0,
sizeof(dhd_mlo_peer_info_t));
if (ret) {
DHD_ERROR(("%s: sta peer info clear failed\n",
__FUNCTION__));
goto exit;
}
- DHD_PRINT(("%s: peer info entry is present already for:"
+ DHD_INFO(("%s: peer info entry is present already for:"
"" MACDBG "\n", __FUNCTION__, MAC2STRDBG(sta->ea.octet)));
} else {
sta->peer_info = MALLOCZ(((dhd_pub_t *)pub)->osh,
sizeof(dhd_mlo_peer_info_t));
if (sta->peer_info == NULL) {
+ DHD_ERROR(("%s: sta peer info allocation failed\n",
+ __FUNCTION__));
goto exit;
}
}
- ret = memcpy_s(sta->peer_info, sizeof(sta->peer_info),
+ ret = memcpy_s(sta->peer_info, sizeof(dhd_mlo_peer_info_t),
peer_info, sizeof(dhd_mlo_peer_info_t));
if (ret) {
DHD_ERROR(("%s: sta peer info copying failed\n", __FUNCTION__));
@@ -3405,8 +3407,8 @@ dhd_ndev_upd_features_handler(void *handle, void *event_info, u8 event)
return;
}
/* wait for 20msec and retry rtnl_lock */
- DHD_PRINT(("%s: rtnl_lock held, wait\n", __FUNCTION__));
- OSL_SLEEP(20);
+ DHD_PRINT(("%s: rtnl_lock held mostly by dhd_open, wait\n", __FUNCTION__));
+ OSL_SLEEP(50);
}
DHD_PRINT(("%s: netdev_update_features\n", __FUNCTION__));
netdev_update_features(net);
@@ -7178,9 +7180,6 @@ dhd_open(struct net_device *net)
}
#endif /* TOE */
- /* enable network offload features like CSO RCO */
- dhd_enable_net_offloads(dhd, net);
-
#ifdef DHD_LB
#if defined(DHD_LB_RXP)
__skb_queue_head_init(&dhd->rx_pend_queue);
@@ -7280,6 +7279,9 @@ dhd_open(struct net_device *net)
dhd_dbgfs_init(&dhd->pub);
#endif
+ /* enable network offload features like CSO RCO */
+ dhd_enable_net_offloads(dhd, net);
+
exit:
mutex_unlock(&dhd->pub.ndev_op_sync);
@@ -9224,16 +9226,6 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
#endif /* CHECK_TRAP_ROT */
#if defined(WBRC)
-#if defined(BCMDHD_MODULAR)
- if (!wbrc_init()) {
-#ifdef WBRC_HW_QUIRKS
- uint chipid = dhd_get_chipid(bus);
- wl2wbrc_wlan_init(&dhd->pub, chipid);
-#else
- wl2wbrc_wlan_init(&dhd->pub);
-#endif /* WBRC_HW_QUIRKS */
- }
-#endif /* BCMDHD_MODULAR */
dhd->pub.chip_bighammer_count = 0;
#endif /* WBRC */
@@ -9362,6 +9354,7 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
#ifdef DHD_TIMESYNC
+ dhd->pub.ts_lock = osl_spin_lock_init(dhd->pub.osh);
/* attach the timesync module */
if (dhd_timesync_attach(&dhd->pub) != 0) {
DHD_ERROR(("dhd_timesync_attach failed\n"));
@@ -11516,14 +11509,6 @@ dhd_optimised_preinit_ioctls(dhd_pub_t * dhd)
dhd->wlc_ver_major = wlc_ver.wlc_ver_major;
dhd->wlc_ver_minor = wlc_ver.wlc_ver_minor;
}
-#if defined(BOARD_HIKEY) || defined (BOARD_STB)
- /* Set op_mode as MFG_MODE if WLTEST is present in "wl ver" */
- if (strstr(fw_version, "WLTEST") != NULL) {
- DHD_PRINT(("%s: wl ver has WLTEST, setting op_mode as DHD_FLAG_MFG_MODE\n",
- __FUNCTION__));
- op_mode = DHD_FLAG_MFG_MODE;
- }
-#endif /* BOARD_HIKEY || BOARD_STB */
/* get a capabilities from firmware */
ret = dhd_get_fw_capabilities(dhd);
@@ -14828,12 +14813,6 @@ void dhd_detach(dhd_pub_t *dhdp)
#endif /* DHD_WLFC_THREAD */
#endif /* PROP_TXSTATUS */
-#ifdef DHD_TIMESYNC
- if (dhd->dhd_state & DHD_ATTACH_TIMESYNC_ATTACH_DONE) {
- dhd_timesync_detach(dhdp);
- }
-#endif /* DHD_TIMESYNC */
-
if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
#if defined(OEM_ANDROID) || !defined(BCMSDIO)
@@ -15188,6 +15167,13 @@ void dhd_detach(dhd_pub_t *dhdp)
dhdp->pom_func_deregister(&dhdp->pom_wlan_handler);
}
#endif /* DHD_ERPOM */
+#ifdef DHD_TIMESYNC
+ if (dhd->dhd_state & DHD_ATTACH_TIMESYNC_ATTACH_DONE) {
+ dhd_timesync_detach(dhdp);
+ }
+ osl_spin_lock_deinit(dhd->pub.osh, dhd->pub.ts_lock);
+#endif /* DHD_TIMESYNC */
+
#if defined(OEM_ANDROID)
dhd_cancel_work_sync(&dhd->dhd_hang_process_work);
@@ -20179,6 +20165,20 @@ char map_path[PATH_MAX] = VENDOR_PATH CONFIG_BCMDHD_MAP_PATH;
extern int dhd_collect_coredump(dhd_pub_t *dhdp, dhd_dump_t *dump);
#endif /* DHD_COREDUMP */
+#ifdef DHD_SSSR_COREDUMP
+static bool
+dhd_is_coredump_reqd(char *trapstr, uint str_len)
+{
+#ifdef DHD_SKIP_COREDUMP_ON_HC
+ if (trapstr && str_len &&
+ strnstr(trapstr, DHD_COREDUMP_IGNORE_TRAP_SIG, str_len)) {
+ return FALSE;
+ }
+#endif /* DHD_SKIP_COREDUMP_ON_HC */
+ return TRUE;
+}
+#endif /* DHD_SSSR_COREDUMP */
+
static void
dhd_mem_dump(void *handle, void *event_info, u8 event)
{
@@ -20200,6 +20200,7 @@ dhd_mem_dump(void *handle, void *event_info, u8 event)
char pc_fn[DHD_FUNC_STR_LEN] = "\0";
char lr_fn[DHD_FUNC_STR_LEN] = "\0";
trap_t *tr;
+ bool collect_coredump = FALSE;
#endif /* DHD_COREDUMP */
uint32 memdump_type;
bool set_linkdwn_cto = FALSE;
@@ -20259,6 +20260,9 @@ dhd_mem_dump(void *handle, void *event_info, u8 event)
dhdp->dongle_fis_enab = FALSE;
switch (dhdp->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_5 :
+ dhdp->dongle_fis_enab = dhdp->sssr_reg_info->rev5.fis_enab;
+ break;
case SSSR_REG_INFO_VER_4 :
dhdp->dongle_fis_enab = dhdp->sssr_reg_info->rev4.fis_enab;
break;
@@ -20386,19 +20390,27 @@ dhd_mem_dump(void *handle, void *event_info, u8 event)
DHD_PRINT(("%s: dump reason: %s\n", __FUNCTION__, dhdp->memdump_str));
#ifdef DHD_SSSR_COREDUMP
- ret = dhd_collect_coredump(dhdp, dump);
- if (ret == BCME_ERROR) {
- DHD_ERROR(("%s: dhd_collect_coredump() failed.\n", __FUNCTION__));
- goto exit;
- } else if (ret == BCME_UNSUPPORTED) {
- DHD_LOG_MEM(("%s: Unable to collect SSSR dumps. Skip it.\n",
+ if (dhd_is_coredump_reqd(dhdp->memdump_str,
+ strnlen(dhdp->memdump_str, DHD_MEMDUMP_LONGSTR_LEN))) {
+ ret = dhd_collect_coredump(dhdp, dump);
+ if (ret == BCME_ERROR) {
+ DHD_ERROR(("%s: dhd_collect_coredump() failed.\n",
+ __FUNCTION__));
+ goto exit;
+ } else if (ret == BCME_UNSUPPORTED) {
+ DHD_LOG_MEM(("%s: Unable to collect SSSR dumps. Skip it.\n",
+ __FUNCTION__));
+ }
+ collect_coredump = TRUE;
+ } else {
+ DHD_PRINT(("%s: coredump not collected, dhd_is_coredump_reqd returns false\n",
__FUNCTION__));
}
#endif /* DHD_SSSR_COREDUMP */
if (memdump_type == DUMP_TYPE_BY_SYSDUMP) {
DHD_LOG_MEM(("%s: coredump is not supported for BY_SYSDUMP/non trap cases\n",
__FUNCTION__));
- } else {
+ } else if (collect_coredump) {
DHD_ERROR(("%s: writing SoC_RAM dump\n", __FUNCTION__));
if (wifi_platform_set_coredump(dhd->adapter, dump->buf,
dump->bufsize, dhdp->memdump_str)) {
@@ -20605,6 +20617,14 @@ dhd_sssr_dig_buf_size(dhd_pub_t *dhdp)
/* SSSR register information structure v0 and v1 shares most except dig_mem */
switch (dhdp->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_5:
+ if ((dhdp->sssr_reg_info->rev5.length >
+ OFFSETOF(sssr_reg_info_v5_t, sssr_all_mem_info)) &&
+ dhdp->sssr_reg_info->rev5.sssr_all_mem_info.sysmem_sssr_size) {
+ dig_buf_size =
+ dhdp->sssr_reg_info->rev5.sssr_all_mem_info.sysmem_sssr_size;
+ }
+ break;
case SSSR_REG_INFO_VER_4:
/* for v4 need to use sssr_all_mem_info instead of dig_mem_info */
if ((dhdp->sssr_reg_info->rev4.length >
@@ -20652,6 +20672,14 @@ dhd_sssr_dig_buf_addr(dhd_pub_t *dhdp)
/* SSSR register information structure v0 and v1 shares most except dig_mem */
switch (dhdp->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_5 :
+ if ((dhdp->sssr_reg_info->rev5.length >
+ OFFSETOF(sssr_reg_info_v5_t, sssr_all_mem_info)) &&
+ dhdp->sssr_reg_info->rev5.sssr_all_mem_info.sysmem_sssr_size) {
+ dig_buf_addr =
+ dhdp->sssr_reg_info->rev5.sssr_all_mem_info.sysmem_sssr_addr;
+ }
+ break;
case SSSR_REG_INFO_VER_4 :
/* for v4 need to use sssr_all_mem_info instead of dig_mem_info */
if ((dhdp->sssr_reg_info->rev4.length >
@@ -20703,6 +20731,9 @@ dhd_sssr_mac_buf_size(dhd_pub_t *dhdp, uint8 core_idx)
/* SSSR register information structure v0 and v1 shares most except dig_mem */
if (core_idx < num_d11cores) {
switch (dhdp->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_5 :
+ mac_buf_size = dhdp->sssr_reg_info->rev5.mac_regs[core_idx].sr_size;
+ break;
case SSSR_REG_INFO_VER_4 :
mac_buf_size = dhdp->sssr_reg_info->rev4.mac_regs[core_idx].sr_size;
break;
@@ -20737,6 +20768,10 @@ dhd_sssr_mac_xmtaddress(dhd_pub_t *dhdp, uint8 core_idx)
/* SSSR register information structure v0 and v1 shares most except dig_mem */
if (core_idx < num_d11cores) {
switch (dhdp->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_5 :
+ xmtaddress = dhdp->sssr_reg_info->rev5.
+ mac_regs[core_idx].base_regs.xmtaddress;
+ break;
case SSSR_REG_INFO_VER_4 :
xmtaddress = dhdp->sssr_reg_info->rev4.
mac_regs[core_idx].base_regs.xmtaddress;
@@ -20775,6 +20810,10 @@ dhd_sssr_mac_xmtdata(dhd_pub_t *dhdp, uint8 core_idx)
/* SSSR register information structure v0 and v1 shares most except dig_mem */
if (core_idx < num_d11cores) {
switch (dhdp->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_5 :
+ xmtdata = dhdp->sssr_reg_info->rev5.
+ mac_regs[core_idx].base_regs.xmtdata;
+ break;
case SSSR_REG_INFO_VER_4 :
xmtdata = dhdp->sssr_reg_info->rev4.
mac_regs[core_idx].base_regs.xmtdata;
@@ -20802,6 +20841,153 @@ dhd_sssr_mac_xmtdata(dhd_pub_t *dhdp, uint8 core_idx)
return xmtdata;
}
+int
+dhd_sssr_sr_asm_version(dhd_pub_t *dhdp, uint16 *sr_asm_version)
+{
+
+ switch (dhdp->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_5:
+ *sr_asm_version = dhdp->sssr_reg_info->rev5.sr_asm_version;
+ break;
+ case SSSR_REG_INFO_VER_4:
+ case SSSR_REG_INFO_VER_3:
+ case SSSR_REG_INFO_VER_2:
+ case SSSR_REG_INFO_VER_1:
+ case SSSR_REG_INFO_VER_0:
+ break;
+ default :
+ DHD_ERROR(("%s invalid sssr_reg_ver", __FUNCTION__));
+ return BCME_UNSUPPORTED;
+ }
+
+ return BCME_OK;
+}
+
+int
+dhd_sssr_mac_war_reg(dhd_pub_t *dhdp, uint8 core_idx, uint32 *war_reg)
+{
+ uint8 num_d11cores;
+
+ num_d11cores = dhd_d11_slices_num_get(dhdp);
+
+ if (core_idx < num_d11cores) {
+ switch (dhdp->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_5:
+ *war_reg = dhdp->sssr_reg_info->rev5.mac_regs[core_idx].war_reg;
+ break;
+ case SSSR_REG_INFO_VER_4:
+ case SSSR_REG_INFO_VER_3:
+ case SSSR_REG_INFO_VER_2:
+ case SSSR_REG_INFO_VER_1:
+ case SSSR_REG_INFO_VER_0:
+ break;
+ default :
+ DHD_ERROR(("%s invalid sssr_reg_ver", __FUNCTION__));
+ return BCME_UNSUPPORTED;
+ }
+ }
+
+ return BCME_OK;
+}
+
+int
+dhd_sssr_arm_war_reg(dhd_pub_t *dhdp, uint32 *war_reg)
+{
+
+ switch (dhdp->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_5:
+ *war_reg = dhdp->sssr_reg_info->rev5.arm_regs.war_reg;
+ break;
+ case SSSR_REG_INFO_VER_4:
+ case SSSR_REG_INFO_VER_3:
+ case SSSR_REG_INFO_VER_2:
+ case SSSR_REG_INFO_VER_1:
+ case SSSR_REG_INFO_VER_0:
+ break;
+ default :
+ DHD_ERROR(("%s invalid sssr_reg_ver", __FUNCTION__));
+ return BCME_UNSUPPORTED;
+ }
+
+ return BCME_OK;
+}
+
+int
+dhd_sssr_saqm_war_reg(dhd_pub_t *dhdp, uint32 *war_reg)
+{
+
+ switch (dhdp->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_5:
+ *war_reg = dhdp->sssr_reg_info->rev5.saqm_sssr_info.war_reg;
+ break;
+ case SSSR_REG_INFO_VER_4:
+ case SSSR_REG_INFO_VER_3:
+ case SSSR_REG_INFO_VER_2:
+ case SSSR_REG_INFO_VER_1:
+ case SSSR_REG_INFO_VER_0:
+ break;
+ default :
+ DHD_ERROR(("%s invalid sssr_reg_ver", __FUNCTION__));
+ return BCME_UNSUPPORTED;
+ }
+
+ return BCME_OK;
+}
+
+uint
+dhd_sssr_saqm_buf_size(dhd_pub_t *dhdp)
+{
+ uint saqm_buf_size = 0;
+
+ /* SSSR register information structure v0 and v1 shares most except dig_mem */
+ switch (dhdp->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_5:
+ if (dhdp->sssr_reg_info->rev5.saqm_sssr_info.saqm_sssr_size > 0) {
+ saqm_buf_size =
+ dhdp->sssr_reg_info->rev5.saqm_sssr_info.saqm_sssr_size;
+ }
+ break;
+ case SSSR_REG_INFO_VER_4:
+ case SSSR_REG_INFO_VER_3:
+ case SSSR_REG_INFO_VER_2:
+ case SSSR_REG_INFO_VER_1:
+ case SSSR_REG_INFO_VER_0:
+ break;
+ default:
+ DHD_ERROR(("invalid sssr_reg_ver"));
+ return BCME_UNSUPPORTED;
+ }
+
+ return saqm_buf_size;
+}
+
+uint
+dhd_sssr_saqm_buf_addr(dhd_pub_t *dhdp)
+{
+ uint saqm_buf_addr = 0;
+
+ /* SSSR register information structure v0 and v1 shares most except dig_mem */
+ switch (dhdp->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_5:
+ if (dhdp->sssr_reg_info->rev5.saqm_sssr_info.saqm_sssr_size > 0) {
+ saqm_buf_addr =
+ dhdp->sssr_reg_info->rev5.saqm_sssr_info.saqm_sssr_addr;
+ }
+ break;
+ case SSSR_REG_INFO_VER_4:
+ case SSSR_REG_INFO_VER_3:
+ case SSSR_REG_INFO_VER_2:
+ case SSSR_REG_INFO_VER_1:
+ case SSSR_REG_INFO_VER_0:
+ break;
+ default:
+ DHD_ERROR(("invalid sssr_reg_ver"));
+ return BCME_UNSUPPORTED;
+ }
+
+ return saqm_buf_addr;
+}
+
#ifdef DHD_SSSR_DUMP_BEFORE_SR
int
dhd_sssr_dump_dig_buf_before(void *dev, const void *user_buf, uint32 len)
@@ -20884,6 +21070,7 @@ dhd_sssr_dump_to_file(dhd_info_t* dhdinfo)
uint dig_buf_size = 0;
uint8 num_d11cores = 0;
uint d11_buf_size = 0;
+ uint saqm_buf_size = 0;
DHD_PRINT(("%s: ENTER \n", __FUNCTION__));
@@ -20975,6 +21162,36 @@ dhd_sssr_dump_to_file(dhd_info_t* dhdinfo)
}
}
+ saqm_buf_size = dhd_sssr_saqm_buf_size(dhdp);
+
+#ifdef DHD_SSSR_DUMP_BEFORE_SR
+ if ((saqm_buf_size > 0) && dhdp->sssr_saqm_buf_before &&
+ (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
+ if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_saqm_buf_before,
+ saqm_buf_size, "sssr_dump_saqm_before_SR")) {
+ DHD_ERROR(("%s: writing SSSR SAQM dump before to the file failed\n",
+ __FUNCTION__));
+ }
+ }
+#endif /* DHD_SSSR_DUMP_BEFORE_SR */
+
+ bzero(after_sr_dump, sizeof(after_sr_dump));
+ if (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_FIS) {
+ snprintf(after_sr_dump, sizeof(after_sr_dump), "%s_%s",
+ "sssr_dump_fis_saqm", "after_SR");
+ } else {
+ snprintf(after_sr_dump, sizeof(after_sr_dump), "%s_%s",
+ "sssr_dump_saqm", "after_SR");
+ }
+
+ if ((saqm_buf_size > 0) && dhdp->sssr_saqm_buf_after) {
+ if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_saqm_buf_after,
+ saqm_buf_size, after_sr_dump)) {
+ DHD_ERROR(("%s: writing SSSR SAQM dump after to the file failed\n",
+ __FUNCTION__));
+ }
+ }
+
exit:
DHD_GENERAL_LOCK(dhdp, flags);
DHD_BUS_BUSY_CLEAR_IN_SSSRDUMP(dhdp);
diff --git a/dhd_linux.h b/dhd_linux.h
index 62ddd1f..4ec7e5f 100644
--- a/dhd_linux.h
+++ b/dhd_linux.h
@@ -93,8 +93,10 @@ enum coredump_types {
DHD_COREDUMP_TYPE_SSSRDUMP_DIG_AFTER = 7,
DHD_COREDUMP_TYPE_SOCRAMDUMP = 8,
#ifdef DHD_SDTC_ETB_DUMP
- DHD_COREDUMP_TYPE_SDTC_ETB_DUMP = 9
+ DHD_COREDUMP_TYPE_SDTC_ETB_DUMP = 9,
#endif /* DHD_SDTC_ETB_DUMP */
+ DHD_COREDUMP_TYPE_SSSRDUMP_SAQM_BEFORE = 10,
+ DHD_COREDUMP_TYPE_SSSRDUMP_SAQM_AFTER = 11
};
#ifdef DHD_SSSR_DUMP
diff --git a/dhd_msgbuf.c b/dhd_msgbuf.c
index 1d7fde1..df20584 100644
--- a/dhd_msgbuf.c
+++ b/dhd_msgbuf.c
@@ -777,6 +777,8 @@ typedef struct dhd_prot {
uint16 max_rxbufpost;
uint32 tot_rxbufpost;
uint32 tot_rxcpl;
+ void *rxp_bufinfo_pool; /* Scratch buffer pool to hold va, pa and pktlens */
+ uint16 rxp_bufinfo_pool_size; /* scartch buffer pool length */
uint16 max_eventbufpost;
uint16 max_ioctlrespbufpost;
uint16 max_tsbufpost;
@@ -1080,7 +1082,7 @@ static void dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub);
static void dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub);
static void dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid);
static int dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid);
-static int dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *pub);
+static int __dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *pub);
static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 pktid, uint32 rxcnt);
@@ -4604,6 +4606,15 @@ dhd_prot_init(dhd_pub_t *dhd)
prot->max_rxbufpost = LEGACY_MAX_RXBUFPOST;
}
prot->rx_buf_burst = (uint16)rx_buf_burst;
+
+ /* allocate a local buffer to store pkt buffer va, pa and length */
+ prot->rxp_bufinfo_pool_size = (sizeof(void *) + sizeof(dmaaddr_t) + sizeof(uint32)) *
+ prot->rx_buf_burst;
+ prot->rxp_bufinfo_pool = VMALLOC(dhd->osh, prot->rxp_bufinfo_pool_size);
+ if (!prot->rxp_bufinfo_pool) {
+ DHD_ERROR(("%s: local scratch buffer allocation failed\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
/*
* Rollback rx-buf_burst to RX_BUF_BURST_V1,
* if the dongle dictated max_rxbufpost is lesser than MIN_HTPUT_H2DRING_RXPOST_SIZE.
@@ -5256,6 +5267,9 @@ dhd_prot_reset(dhd_pub_t *dhd)
dhd_msgbuf_agg_h2d_db_timer_reset(dhd);
#endif /* AGG_H2D_DB */
+ if (prot->rxp_bufinfo_pool) {
+ VMFREE(dhd->osh, prot->rxp_bufinfo_pool, prot->rxp_bufinfo_pool_size);
+ }
} /* dhd_prot_reset */
#if defined(DHD_LB_RXP)
@@ -6076,7 +6090,7 @@ int dhd_sync_with_dongle(dhd_pub_t *dhd)
dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
/* Post ts buffer after shim layer is attached */
- ret = dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
+ ret = __dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
/* query for 'wlc_ver' to get version info from firmware */
/* set the buf to zero */
@@ -6727,8 +6741,6 @@ BCMFASTPATH(dhd_prot_rxbuf_post)(dhd_pub_t *dhd, uint16 count, bool use_rsv_pkti
uint32 pktid;
dhd_prot_t *prot = dhd->prot;
msgbuf_ring_t *ring = &prot->h2dring_rxp_subn;
- void *lcl_buf;
- uint16 lcl_buf_size;
#ifdef BCM_ROUTER_DHD
prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ + BCMEXTRAHDROOM;
@@ -6739,18 +6751,9 @@ BCMFASTPATH(dhd_prot_rxbuf_post)(dhd_pub_t *dhd, uint16 count, bool use_rsv_pkti
if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus, __FUNCTION__) != BCME_OK)
return BCME_ERROR;
#endif /* PCIE_INB_DW */
- /* allocate a local buffer to store pkt buffer va, pa and length */
- lcl_buf_size = (sizeof(void *) + sizeof(dmaaddr_t) + sizeof(uint32)) *
- prot->rx_buf_burst;
- lcl_buf = MALLOC(dhd->osh, lcl_buf_size);
- if (!lcl_buf) {
- DHD_ERROR(("%s: local scratch buffer allocation failed\n", __FUNCTION__));
-#ifdef PCIE_INB_DW
- dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus, __FUNCTION__);
-#endif
- return 0;
- }
- pktbuf = lcl_buf;
+
+ /* Use the rxp buffer info pool to store pa, va and pktlen of allocated buffers */
+ pktbuf = prot->rxp_bufinfo_pool;
pktbuf_pa = (dmaaddr_t *)((uint8 *)pktbuf + sizeof(void *) * prot->rx_buf_burst);
pktlen = (uint32 *)((uint8 *)pktbuf_pa + sizeof(dmaaddr_t) * prot->rx_buf_burst);
@@ -6939,7 +6942,9 @@ cleanup:
PKTFREE(dhd->osh, p, FALSE);
}
- MFREE(dhd->osh, lcl_buf, lcl_buf_size);
+ /* Zero out memory to prevent any stale access */
+ bzero(prot->rxp_bufinfo_pool, prot->rxp_bufinfo_pool_size);
+
#ifdef PCIE_INB_DW
dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus, __FUNCTION__);
#endif
@@ -7456,8 +7461,9 @@ dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *dhd)
MSG_TYPE_EVENT_BUF_POST, max_to_post);
}
+/* caller should take the lock */
static int
-dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *dhd)
+__dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *dhd)
{
#ifdef DHD_TIMESYNC
dhd_prot_t *prot = dhd->prot;
@@ -8330,7 +8336,6 @@ BCMFASTPATH(dhd_prot_process_msgbuf_rxcpl)(dhd_pub_t *dhd, int ringtype, uint32
}
}
#endif /* DHD_HP2P */
-
#ifdef DHD_TIMESYNC
if (prot->rx_ts_log_enabled) {
dhd_pkt_parse_t parse;
@@ -9554,7 +9559,6 @@ BCMFASTPATH(dhd_prot_txstatus_process)(dhd_pub_t *dhd, void *msg)
txcpl_info->tx_history[txcpl_info->txcpl_hist_count].flowid = flowid;
txcpl_info->txcpl_hist_count =
(txcpl_info->txcpl_hist_count +1) % MAX_TXCPL_HISTORY;
-
#ifdef DHD_TIMESYNC
if (dhd->prot->tx_ts_log_enabled) {
dhd_pkt_parse_t parse;
@@ -9569,7 +9573,6 @@ BCMFASTPATH(dhd_prot_txstatus_process)(dhd_pub_t *dhd, void *msg)
ts->low, ts->high, &parse);
}
#endif /* DHD_TIMESYNC */
-
#ifdef DHD_LBUF_AUDIT
PKTAUDIT(dhd->osh, pkt);
#endif
@@ -14599,25 +14602,28 @@ dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf)
#ifdef DHD_TIMESYNC
host_timestamp_msg_cpl_t *host_ts_cpl;
uint32 pktid;
+ unsigned long flags = 0;
dhd_prot_t *prot = dhd->prot;
host_ts_cpl = (host_timestamp_msg_cpl_t *)buf;
DHD_INFO(("%s host TS cpl: status %d, req_ID: 0x%04x, xt_id %d \n", __FUNCTION__,
host_ts_cpl->cmplt.status, host_ts_cpl->msg.request_id, host_ts_cpl->xt_id));
-
pktid = ltoh32(host_ts_cpl->msg.request_id);
+ DHD_TIMESYNC_LOCK(dhd->ts_lock, flags);
if (prot->hostts_req_buf_inuse == FALSE) {
DHD_ERROR(("No Pending Host TS req, but completion\n"));
- return;
+ goto exit;
}
prot->hostts_req_buf_inuse = FALSE;
if (pktid != DHD_H2D_HOSTTS_REQ_PKTID) {
DHD_ERROR(("Host TS req CPL, but req ID different 0x%04x, exp 0x%04x\n",
pktid, DHD_H2D_HOSTTS_REQ_PKTID));
- return;
+ goto exit;
}
- dhd_timesync_handle_host_ts_complete(dhd->ts, host_ts_cpl->xt_id,
+ __dhd_timesync_handle_host_ts_complete(dhd->ts, host_ts_cpl->xt_id,
host_ts_cpl->cmplt.status);
+exit:
+ DHD_TIMESYNC_UNLOCK(dhd->ts_lock, flags);
#else /* DHD_TIMESYNC */
DHD_ERROR(("Timesunc feature not compiled in but GOT HOST_TS_COMPLETE\n"));
#endif /* DHD_TIMESYNC */
@@ -16876,6 +16882,7 @@ int dhd_prot_dump_extended_trap(dhd_pub_t *dhdp, struct bcmstrbuf *b, bool raw)
}
#ifdef BCMPCIE
+#if defined(DHD_TIMESYNC)
int
dhd_prot_send_host_timestamp(dhd_pub_t *dhdp, uchar *tlvs, uint16 tlv_len,
uint16 seqnum, uint16 xt_id)
@@ -16886,7 +16893,7 @@ dhd_prot_send_host_timestamp(dhd_pub_t *dhdp, uchar *tlvs, uint16 tlv_len,
uint16 alloced = 0;
uchar *ts_tlv_buf;
msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
- int ret;
+ int ret = 0;
if ((tlvs == NULL) || (tlv_len == 0)) {
DHD_ERROR(("%s: argument error tlv: %p, tlv_len %d\n",
@@ -16904,11 +16911,8 @@ dhd_prot_send_host_timestamp(dhd_pub_t *dhdp, uchar *tlvs, uint16 tlv_len,
/* if Host TS req already pending go away */
if (prot->hostts_req_buf_inuse == TRUE) {
DHD_ERROR(("one host TS request already pending at device\n"));
- DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
-#ifdef PCIE_INB_DW
- dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus, __FUNCTION__);
-#endif
- return -1;
+ ret = -1;
+ goto exit;
}
/* Request for cbuf space */
@@ -16916,11 +16920,8 @@ dhd_prot_send_host_timestamp(dhd_pub_t *dhdp, uchar *tlvs, uint16 tlv_len,
DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced, FALSE);
if (ts_req == NULL) {
DHD_ERROR(("couldn't allocate space on msgring to send host TS request\n"));
- DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
-#ifdef PCIE_INB_DW
- dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus, __FUNCTION__);
-#endif
- return -1;
+ ret = -1;
+ goto exit;
}
/* Common msg buf hdr */
@@ -16945,7 +16946,8 @@ dhd_prot_send_host_timestamp(dhd_pub_t *dhdp, uchar *tlvs, uint16 tlv_len,
if (ret) {
DHD_ERROR(("copy ioct payload failed:%d, destsz:%d, n:%d\n",
ret, prot->hostts_req_buf.len, tlv_len));
- return BCME_ERROR;
+ ret = BCME_ERROR;
+ goto exit;
}
OSL_CACHE_FLUSH((void *) prot->hostts_req_buf.va, tlv_len);
@@ -16961,15 +16963,15 @@ dhd_prot_send_host_timestamp(dhd_pub_t *dhdp, uchar *tlvs, uint16 tlv_len,
/* upd wrt ptr and raise interrupt */
dhd_prot_ring_write_complete(dhdp, ctrl_ring, ts_req,
DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
-
+exit:
DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
#ifdef PCIE_INB_DW
dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus, __FUNCTION__);
#endif
- return 0;
+ return ret;
} /* dhd_prot_send_host_timestamp */
-
+#endif /* DHD_TIMESYNC */
bool
dhd_prot_data_path_tx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set)
@@ -17036,10 +17038,10 @@ dhd_prot_dma_indx_free(dhd_pub_t *dhd)
}
void
-dhd_msgbuf_delay_post_ts_bufs(dhd_pub_t *dhd)
+__dhd_msgbuf_delay_post_ts_bufs(dhd_pub_t *dhd)
{
if (dhd->prot->max_tsbufpost > 0)
- dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
+ __dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
}
static void
@@ -17047,9 +17049,10 @@ BCMFASTPATH(dhd_prot_process_fw_timestamp)(dhd_pub_t *dhd, void* buf)
{
#ifdef DHD_TIMESYNC
fw_timestamp_event_msg_t *resp;
+ void * pkt;
uint32 pktid;
uint16 buflen, seqnum;
- void * pkt;
+ unsigned long flags = 0;
resp = (fw_timestamp_event_msg_t *)buf;
pktid = ltoh32(resp->msg.request_id);
@@ -17060,20 +17063,20 @@ BCMFASTPATH(dhd_prot_process_fw_timestamp)(dhd_pub_t *dhd, void* buf)
DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_ctrl_map, pktid,
DHD_DUPLICATE_FREE);
#endif /* DHD_PKTID_AUDIT_RING */
-
DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d\n",
pktid, buflen, resp->msg.flags, ltoh16(resp->seqnum)));
+ DHD_TIMESYNC_LOCK(dhd->ts_lock, flags);
if (!dhd->prot->cur_ts_bufs_posted) {
DHD_ERROR(("tsbuf posted are zero, but there is a completion\n"));
- return;
+ goto exit;
}
dhd->prot->cur_ts_bufs_posted--;
if (!dhd_timesync_delay_post_bufs(dhd)) {
if (dhd->prot->max_tsbufpost > 0) {
- dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
+ __dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
}
}
@@ -17081,12 +17084,14 @@ BCMFASTPATH(dhd_prot_process_fw_timestamp)(dhd_pub_t *dhd, void* buf)
if (!pkt) {
DHD_ERROR(("no ts buffer associated with pktid 0x%04x\n", pktid));
- return;
+ goto exit;
}
PKTSETLEN(dhd->osh, pkt, buflen);
- dhd_timesync_handle_fw_timestamp(dhd->ts, PKTDATA(dhd->osh, pkt), buflen, seqnum);
+ __dhd_timesync_handle_fw_timestamp(dhd->ts, PKTDATA(dhd->osh, pkt), buflen, seqnum);
PKTFREE_CTRLBUF(dhd->osh, pkt, TRUE);
+exit:
+ DHD_TIMESYNC_UNLOCK(dhd->ts_lock, flags);
#else /* DHD_TIMESYNC */
DHD_ERROR(("Timesunc feature not compiled in but GOT FW TS message\n"));
#endif /* DHD_TIMESYNC */
diff --git a/dhd_pcie.c b/dhd_pcie.c
index ef31f13..74fecb2 100644
--- a/dhd_pcie.c
+++ b/dhd_pcie.c
@@ -3619,7 +3619,7 @@ dhdpcie_bus_release(dhd_bus_t *bus)
/* For STB, it is causing kernel panic during reboot if RC is kept in off
* state, so keep the RC in ON state
*/
- DHD_PRINT(("%s: Keep EP powered on during rmmod to avoid Kernel Panic\n",
+ DHD_PRINT(("%s: Keep EP powered on during rmmod for STB boards\n",
__FUNCTION__));
dhd_wifi_platform_set_power(bus->dhd, TRUE);
dhdpcie_bus_start_host_dev(bus);
@@ -17577,6 +17577,8 @@ dhdpcie_chipmatch(uint16 vendor, uint16 device)
case BCM4383_D11AX_ID:
case BCM4390_CHIP_GRPID:
case BCM4390_D11BE_ID:
+ case BCM4399_CHIP_GRPID:
+ case BCM4399_D11BE_ID:
return 0;
default:
#ifndef DHD_EFI
@@ -19094,6 +19096,12 @@ dhdpcie_get_sssr_dig_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
dig_mem_check = FALSE;
/* SSSR register information structure v0 and v1 shares most except dig_mem */
switch (dhd->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_5 :
+ if ((dhd->sssr_reg_info->rev5.length > OFFSETOF(sssr_reg_info_v5_t,
+ dig_mem_info)) && dhd->sssr_reg_info->rev5.dig_mem_info.dig_sssr_size) {
+ dig_mem_check = TRUE;
+ }
+ break;
case SSSR_REG_INFO_VER_4 :
if ((dhd->sssr_reg_info->rev4.length > OFFSETOF(sssr_reg_info_v4_t,
dig_mem_info)) && dhd->sssr_reg_info->rev4.dig_mem_info.dig_sssr_size) {
@@ -19194,6 +19202,63 @@ dhdpcie_get_sssr_dig_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
return BCME_OK;
}
+static int
+dhdpcie_get_sssr_saqm_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
+ uint addr_reg)
+{
+ bool saqm_sssr_check;
+
+ DHD_PRINT(("%s addr_reg=0x%x size=0x%x\n", __FUNCTION__, addr_reg, fifo_size));
+
+ if (!buf) {
+ DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if (!fifo_size) {
+ DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ saqm_sssr_check = FALSE;
+ /* SSSR register information structure v0 and v1 shares most except dig_mem */
+ switch (dhd->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_5:
+ if ((dhd->sssr_reg_info->rev5.length > OFFSETOF(sssr_reg_info_v5_t,
+ saqm_sssr_info)) && dhd->sssr_reg_info->rev5.saqm_sssr_info.
+ saqm_sssr_size) {
+ saqm_sssr_check = TRUE;
+ }
+ break;
+ case SSSR_REG_INFO_VER_4:
+ case SSSR_REG_INFO_VER_3:
+ case SSSR_REG_INFO_VER_2:
+ case SSSR_REG_INFO_VER_1:
+ case SSSR_REG_INFO_VER_0:
+ break;
+ default:
+ DHD_ERROR(("invalid sssr_reg_ver"));
+ return BCME_UNSUPPORTED;
+ }
+ if (addr_reg) {
+ DHD_PRINT(("saqm_sssr_check=%d\n", saqm_sssr_check));
+ if (saqm_sssr_check) {
+ int err = dhdpcie_bus_membytes(dhd->bus, FALSE, DHD_PCIE_MEM_BAR1, addr_reg,
+ (uint8 *)buf, fifo_size);
+ if (err != BCME_OK) {
+ DHD_ERROR(("%s: Error reading saqm dump from dongle !\n",
+ __FUNCTION__));
+ }
+ } else {
+ return BCME_UNSUPPORTED;
+ }
+ } else {
+ return BCME_UNSUPPORTED;
+ }
+
+ return BCME_OK;
+}
+
#if defined(BCMPCIE) && defined(EWP_ETD_PRSRV_LOGS)
void
dhdpcie_get_etd_preserve_logs(dhd_pub_t *dhd,
@@ -19312,6 +19377,9 @@ dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t *dhd, uint32 reg_val)
/* SSSR register information structure v0 and v1 shares most except dig_mem */
switch (dhd->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_5 :
+ /* Handled using MaxRsrcMask for rev5 and above */
+ goto exit;
case SSSR_REG_INFO_VER_4 :
addr = dhd->sssr_reg_info->rev4.chipcommon_regs.base_regs.powerctrl;
powerctrl_mask = dhd->sssr_reg_info->rev4.
@@ -19341,6 +19409,7 @@ dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t *dhd, uint32 reg_val)
if (!(val & powerctrl_mask)) {
dhd_sbreg_op(dhd, addr, &reg_val, FALSE);
}
+exit:
return BCME_OK;
}
@@ -19405,8 +19474,8 @@ dhdpcie_clear_intmask_and_timer(dhd_pub_t *dhd)
pmuintmask1 = dhd->sssr_reg_info->rev4.pmu_regs.base_regs.pmuintmask1;
resreqtimer = dhd->sssr_reg_info->rev4.pmu_regs.base_regs.resreqtimer;
macresreqtimer = dhd->sssr_reg_info->rev4.pmu_regs.base_regs.macresreqtimer;
- macresreqtimer1 = dhd->sssr_reg_info->rev4.
- pmu_regs.base_regs.macresreqtimer1;
+ macresreqtimer1 = dhd->sssr_reg_info->rev4.pmu_regs.
+ base_regs.macresreqtimer1;
break;
case SSSR_REG_INFO_VER_3 :
/* intentional fall through */
@@ -19528,7 +19597,6 @@ exit:
static int
dhdpcie_saqm_clear_clk_req(dhd_pub_t *dhdp)
{
- sssr_reg_info_v4_t *sssr_reg_info = &dhdp->sssr_reg_info->rev4;
uint32 clockcontrolstatus_val = 0, clockcontrolstatus = 0, saqm_extrsrcreq = 0;
uint32 digsr_srcontrol2_addr = 0, pmuchip_ctl_addr_reg = 0, pmuchip_ctl_data_reg = 0;
uint32 digsr_srcontrol2_setbit_val = 0, pmuchip_ctl_val = 0, pmuchip_ctl_setbit_val = 0;
@@ -19542,54 +19610,188 @@ dhdpcie_saqm_clear_clk_req(dhd_pub_t *dhdp)
}
DHD_PRINT(("%s\n", __FUNCTION__));
- saqm_extrsrcreq = sssr_reg_info->saqm_sssr_info.oobr_regs.extrsrcreq;
- if (saqm_extrsrcreq) {
- /* read is for information purpose only. */
- dhd_sbreg_op(dhdp, saqm_extrsrcreq, &clockcontrolstatus_val, TRUE);
- clockcontrolstatus = sssr_reg_info->saqm_sssr_info.base_regs.clockcontrolstatus;
- dhd_sbreg_op(dhdp, clockcontrolstatus, &clockcontrolstatus_val, TRUE);
- clockcontrolstatus_val |=
- sssr_reg_info->saqm_sssr_info.base_regs.clockcontrolstatus_val;
-
- dhd_sbreg_op(dhdp, clockcontrolstatus, &clockcontrolstatus_val, FALSE);
- OSL_DELAY(SAQM_CLK_REQ_CLR_DELAY);
- }
-
- /* set DIG force_sr_all bit */
- digsr_srcontrol2_addr =
- sssr_reg_info->saqm_sssr_info.sssr_config_regs.digsr_srcontrol2_addr;
- if (digsr_srcontrol2_addr) {
- dhd_sbreg_op(dhdp, digsr_srcontrol2_addr, &val, TRUE);
- digsr_srcontrol2_setbit_val =
- sssr_reg_info->saqm_sssr_info.sssr_config_regs.digsr_srcontrol2_setbit_val;
- val |= digsr_srcontrol2_setbit_val;
- dhd_sbreg_op(dhdp, digsr_srcontrol2_addr, &val, FALSE);
- }
-
- /* Disable SR self test */
- digsr_srcontrol1_addr =
- sssr_reg_info->saqm_sssr_info.sssr_config_regs.digsr_srcontrol1_addr;
- digsr_srcontrol1_clrbit_val =
- sssr_reg_info->saqm_sssr_info.sssr_config_regs.digsr_srcontrol1_clrbit_val;
- if (digsr_srcontrol1_addr) {
- dhd_sbreg_op(dhdp, digsr_srcontrol1_addr, &val, TRUE);
- val &= ~(digsr_srcontrol1_clrbit_val);
- dhd_sbreg_op(dhdp, digsr_srcontrol1_addr, &val, FALSE);
- }
-
- /* set PMU chip ctrl saqm_sr_enable bit */
- pmuchip_ctl_addr_reg = sssr_reg_info->saqm_sssr_info.sssr_config_regs.pmuchip_ctl_addr_reg;
- pmuchip_ctl_val = sssr_reg_info->saqm_sssr_info.sssr_config_regs.pmuchip_ctl_val;
- if (pmuchip_ctl_addr_reg) {
- dhd_sbreg_op(dhdp, pmuchip_ctl_addr_reg, &pmuchip_ctl_val, FALSE);
- }
- pmuchip_ctl_data_reg = sssr_reg_info->saqm_sssr_info.sssr_config_regs.pmuchip_ctl_data_reg;
- pmuchip_ctl_setbit_val =
- sssr_reg_info->saqm_sssr_info.sssr_config_regs.pmuchip_ctl_setbit_val;
- if (pmuchip_ctl_data_reg) {
- dhd_sbreg_op(dhdp, pmuchip_ctl_data_reg, &val, TRUE);
- val |= pmuchip_ctl_setbit_val;
- dhd_sbreg_op(dhdp, pmuchip_ctl_data_reg, &val, FALSE);
+ switch (dhdp->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_5 :
+ saqm_extrsrcreq = dhdp->sssr_reg_info->rev5.saqm_sssr_info.
+ oobr_regs.extrsrcreq;
+ if (saqm_extrsrcreq) {
+ /* read is for information purpose only. */
+ dhd_sbreg_op(dhdp, saqm_extrsrcreq, &clockcontrolstatus_val, TRUE);
+ clockcontrolstatus = dhdp->sssr_reg_info->rev5.saqm_sssr_info.
+ base_regs.clockcontrolstatus;
+ dhd_sbreg_op(dhdp, clockcontrolstatus,
+ &clockcontrolstatus_val, TRUE);
+ clockcontrolstatus_val |=
+ dhdp->sssr_reg_info->rev5.saqm_sssr_info.
+ base_regs.clockcontrolstatus_val;
+ dhd_sbreg_op(dhdp, clockcontrolstatus, &clockcontrolstatus_val,
+ FALSE);
+ OSL_DELAY(SAQM_CLK_REQ_CLR_DELAY);
+ }
+ /* set DIG force_sr_all bit */
+ digsr_srcontrol2_addr =
+ dhdp->sssr_reg_info->rev5.saqm_sssr_info.sssr_config_regs.
+ digsr_srcontrol2_addr;
+ if (digsr_srcontrol2_addr) {
+ dhd_sbreg_op(dhdp, digsr_srcontrol2_addr, &val, TRUE);
+ digsr_srcontrol2_setbit_val =
+ dhdp->sssr_reg_info->rev5.saqm_sssr_info.sssr_config_regs.
+ digsr_srcontrol2_setbit_val;
+ val |= digsr_srcontrol2_setbit_val;
+ dhd_sbreg_op(dhdp, digsr_srcontrol2_addr, &val, FALSE);
+ }
+
+ /* Disable SR self test */
+ digsr_srcontrol1_addr =
+ dhdp->sssr_reg_info->rev5.saqm_sssr_info.sssr_config_regs.
+ digsr_srcontrol1_addr;
+ digsr_srcontrol1_clrbit_val =
+ dhdp->sssr_reg_info->rev5.saqm_sssr_info.sssr_config_regs.
+ digsr_srcontrol1_clrbit_val;
+ if (digsr_srcontrol1_addr) {
+ dhd_sbreg_op(dhdp, digsr_srcontrol1_addr, &val, TRUE);
+ val &= ~(digsr_srcontrol1_clrbit_val);
+ dhd_sbreg_op(dhdp, digsr_srcontrol1_addr, &val, FALSE);
+ }
+
+ /* set PMU chip ctrl saqm_sr_enable bit */
+ pmuchip_ctl_addr_reg = dhdp->sssr_reg_info->rev5.saqm_sssr_info.
+ sssr_config_regs.pmuchip_ctl_addr_reg;
+ pmuchip_ctl_val = dhdp->sssr_reg_info->rev5.saqm_sssr_info.
+ sssr_config_regs.pmuchip_ctl_val;
+ if (pmuchip_ctl_addr_reg) {
+ dhd_sbreg_op(dhdp, pmuchip_ctl_addr_reg, &pmuchip_ctl_val, FALSE);
+ }
+ pmuchip_ctl_data_reg = dhdp->sssr_reg_info->rev5.saqm_sssr_info.
+ sssr_config_regs.pmuchip_ctl_data_reg;
+ pmuchip_ctl_setbit_val =
+ dhdp->sssr_reg_info->rev5.saqm_sssr_info.sssr_config_regs.
+ pmuchip_ctl_setbit_val;
+ if (pmuchip_ctl_data_reg) {
+ dhd_sbreg_op(dhdp, pmuchip_ctl_data_reg, &val, TRUE);
+ val |= pmuchip_ctl_setbit_val;
+ dhd_sbreg_op(dhdp, pmuchip_ctl_data_reg, &val, FALSE);
+ }
+ break;
+ case SSSR_REG_INFO_VER_4 :
+ saqm_extrsrcreq = dhdp->sssr_reg_info->rev4.saqm_sssr_info.
+ oobr_regs.extrsrcreq;
+ if (saqm_extrsrcreq) {
+ /* read is for information purpose only. */
+ dhd_sbreg_op(dhdp, saqm_extrsrcreq, &clockcontrolstatus_val, TRUE);
+ clockcontrolstatus = dhdp->sssr_reg_info->rev4.saqm_sssr_info.
+ base_regs.clockcontrolstatus;
+ dhd_sbreg_op(dhdp, clockcontrolstatus, &clockcontrolstatus_val,
+ TRUE);
+ clockcontrolstatus_val |=
+ dhdp->sssr_reg_info->rev4.saqm_sssr_info.
+ base_regs.clockcontrolstatus_val;
+
+ dhd_sbreg_op(dhdp, clockcontrolstatus, &clockcontrolstatus_val,
+ FALSE);
+ OSL_DELAY(SAQM_CLK_REQ_CLR_DELAY);
+ }
+
+ /* set DIG force_sr_all bit */
+ digsr_srcontrol2_addr =
+ dhdp->sssr_reg_info->rev4.saqm_sssr_info.sssr_config_regs.
+ digsr_srcontrol2_addr;
+ if (digsr_srcontrol2_addr) {
+ dhd_sbreg_op(dhdp, digsr_srcontrol2_addr, &val, TRUE);
+ digsr_srcontrol2_setbit_val =
+ dhdp->sssr_reg_info->rev4.saqm_sssr_info.sssr_config_regs.
+ digsr_srcontrol2_setbit_val;
+ val |= digsr_srcontrol2_setbit_val;
+ dhd_sbreg_op(dhdp, digsr_srcontrol2_addr, &val, FALSE);
+ }
+
+ /* Disable SR self test */
+ digsr_srcontrol1_addr =
+ dhdp->sssr_reg_info->rev4.saqm_sssr_info.sssr_config_regs.
+ digsr_srcontrol1_addr;
+ digsr_srcontrol1_clrbit_val =
+ dhdp->sssr_reg_info->rev4.saqm_sssr_info.sssr_config_regs.
+ digsr_srcontrol1_clrbit_val;
+ if (digsr_srcontrol1_addr) {
+ dhd_sbreg_op(dhdp, digsr_srcontrol1_addr, &val, TRUE);
+ val &= ~(digsr_srcontrol1_clrbit_val);
+ dhd_sbreg_op(dhdp, digsr_srcontrol1_addr, &val, FALSE);
+ }
+
+ /* set PMU chip ctrl saqm_sr_enable bit */
+ pmuchip_ctl_addr_reg = dhdp->sssr_reg_info->rev4.saqm_sssr_info.
+ sssr_config_regs.pmuchip_ctl_addr_reg;
+ pmuchip_ctl_val = dhdp->sssr_reg_info->rev4.saqm_sssr_info.
+ sssr_config_regs.pmuchip_ctl_val;
+ if (pmuchip_ctl_addr_reg) {
+ dhd_sbreg_op(dhdp, pmuchip_ctl_addr_reg, &pmuchip_ctl_val, FALSE);
+ }
+ pmuchip_ctl_data_reg = dhdp->sssr_reg_info->rev4.saqm_sssr_info.
+ sssr_config_regs.pmuchip_ctl_data_reg;
+ pmuchip_ctl_setbit_val =
+ dhdp->sssr_reg_info->rev4.saqm_sssr_info.sssr_config_regs.
+ pmuchip_ctl_setbit_val;
+ if (pmuchip_ctl_data_reg) {
+ dhd_sbreg_op(dhdp, pmuchip_ctl_data_reg, &val, TRUE);
+ val |= pmuchip_ctl_setbit_val;
+ dhd_sbreg_op(dhdp, pmuchip_ctl_data_reg, &val, FALSE);
+ }
+ break;
+ default :
+ DHD_ERROR(("invalid sssr_reg_ver"));
+ return BCME_UNSUPPORTED;
+ }
+exit:
+ si_setcoreidx(dhdp->bus->sih, save_idx);
+ return BCME_OK;
+}
+
+static int
+dhdpcie_saqm_clear_force_sr_all(dhd_pub_t *dhdp)
+{
+ uint32 val = 0, digsr_srcontrol2_addr = 0, digsr_srcontrol2_setbit_val = 0;
+ uint save_idx = si_coreidx(dhdp->bus->sih);
+
+ if ((si_setcore(dhdp->bus->sih, D11_SAQM_CORE_ID, 0) == NULL) ||
+ !si_iscoreup(dhdp->bus->sih)) {
+ goto exit;
+ }
+
+ DHD_PRINT(("%s\n", __FUNCTION__));
+ switch (dhdp->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_5:
+ /* clear DIG force_sr_all bit */
+ digsr_srcontrol2_addr =
+ dhdp->sssr_reg_info->rev5.saqm_sssr_info.sssr_config_regs.
+ digsr_srcontrol2_addr;
+ if (digsr_srcontrol2_addr) {
+ dhd_sbreg_op(dhdp, digsr_srcontrol2_addr, &val, TRUE);
+ digsr_srcontrol2_setbit_val =
+ dhdp->sssr_reg_info->rev5.saqm_sssr_info.sssr_config_regs.
+ digsr_srcontrol2_setbit_val;
+ val &= ~digsr_srcontrol2_setbit_val;
+ dhd_sbreg_op(dhdp, digsr_srcontrol2_addr, &val, FALSE);
+ }
+
+ break;
+ case SSSR_REG_INFO_VER_4:
+ /* clear DIG force_sr_all bit */
+ digsr_srcontrol2_addr =
+ dhdp->sssr_reg_info->rev4.saqm_sssr_info.sssr_config_regs.
+ digsr_srcontrol2_addr;
+ if (digsr_srcontrol2_addr) {
+ dhd_sbreg_op(dhdp, digsr_srcontrol2_addr, &val, TRUE);
+ digsr_srcontrol2_setbit_val =
+ dhdp->sssr_reg_info->rev4.saqm_sssr_info.sssr_config_regs.
+ digsr_srcontrol2_setbit_val;
+ val &= ~digsr_srcontrol2_setbit_val;
+ dhd_sbreg_op(dhdp, digsr_srcontrol2_addr, &val, FALSE);
+ }
+
+ break;
+ default:
+ DHD_ERROR(("invalid sssr_reg_ver"));
+ return BCME_UNSUPPORTED;
}
exit:
si_setcoreidx(dhdp->bus->sih, save_idx);
@@ -19668,6 +19870,14 @@ dhdpcie_arm_clear_clk_req(dhd_pub_t *dhd)
/* SSSR register information structure v0 and v1 shares most except dig_mem */
switch (dhd->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_5 :
+ clockrequeststatus = dhd->sssr_reg_info->rev5.
+ arm_regs.oobr_regs.extrsrcreq;
+ clockcontrolstatus = dhd->sssr_reg_info->rev5.
+ arm_regs.base_regs.clockcontrolstatus;
+ clockcontrolstatus_val = dhd->sssr_reg_info->rev5.
+ arm_regs.base_regs.clockcontrolstatus_val;
+ break;
case SSSR_REG_INFO_VER_4 :
clockrequeststatus = dhd->sssr_reg_info->rev4.
arm_regs.oobr_regs.extrsrcreq;
@@ -19829,6 +20039,9 @@ dhdpcie_pcie_send_ltrsleep(dhd_pub_t *dhd)
/* SSSR register information structure v0 and v1 shares most except dig_mem */
switch (dhd->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_5 :
+ addr = dhd->sssr_reg_info->rev5.pcie_regs.base_regs.ltrstate;
+ break;
case SSSR_REG_INFO_VER_4 :
addr = dhd->sssr_reg_info->rev4.pcie_regs.base_regs.ltrstate;
break;
@@ -19931,13 +20144,152 @@ dhdpcie_bring_saqm_updown(dhd_pub_t *dhdp, bool down)
return BCME_OK;
}
+static void
+dhdpcie_sssr_common_header(dhd_pub_t *dhd, sssr_header_t *sssr_header)
+{
+ int ret = 0;
+ uint16 sr_asm_version;
+
+ sssr_header->magic = SSSR_HEADER_MAGIC;
+ ret = dhd_sssr_sr_asm_version(dhd, &sr_asm_version);
+ if (ret == BCME_OK) {
+ sssr_header->sr_version = sr_asm_version;
+ }
+ sssr_header->header_len =
+ OFFSETOF(sssr_header_t, flags) - OFFSETOF(sssr_header_t, header_len);
+ sssr_header->chipid = dhd_bus_chip(dhd->bus);
+ sssr_header->chiprev = dhd_bus_chiprev(dhd->bus);
+
+}
+
+static int
+dhdpcie_sssr_d11_header(dhd_pub_t *dhd, uint *buf, uint32 data_len, uint16 coreunit)
+{
+ int len = 0;
+ int ret = 0;
+
+ switch (dhd->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_5 :
+ {
+ sssr_header_t sssr_header;
+ uint32 war_reg = 0;
+ bzero(&sssr_header, sizeof(sssr_header_t));
+ dhdpcie_sssr_common_header(dhd, &sssr_header);
+ sssr_header.data_len = data_len;
+ sssr_header.coreid = D11_CORE_ID;
+ sssr_header.coreunit = coreunit;
+ ret = dhd_sssr_mac_war_reg(dhd, coreunit, &war_reg);
+ if (ret == BCME_OK) {
+ sssr_header.war_reg = war_reg;
+ }
+ (void)memcpy_s(buf, data_len, &sssr_header, sizeof(sssr_header_t));
+ len = sizeof(sssr_header_t);
+ }
+ break;
+ default :
+ len = 0;
+ }
+
+ return len;
+}
+
+static int
+dhdpcie_sssr_dig_header(dhd_pub_t *dhd, uint *buf, uint32 data_len)
+{
+ int len = 0;
+ int ret = 0;
+
+ switch (dhd->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_5 :
+ {
+ sssr_header_t sssr_header;
+ uint32 war_reg = 0;
+ bzero(&sssr_header, sizeof(sssr_header_t));
+ dhdpcie_sssr_common_header(dhd, &sssr_header);
+ sssr_header.data_len = data_len;
+ sssr_header.coreid = dhd->bus->coreid;
+ ret = dhd_sssr_arm_war_reg(dhd, &war_reg);
+ if (ret == BCME_OK) {
+ sssr_header.war_reg = war_reg;
+ }
+ (void)memcpy_s(buf, data_len, &sssr_header, sizeof(sssr_header_t));
+ len = sizeof(sssr_header_t);
+ }
+ break;
+ default :
+ len = 0;
+ }
+
+ return len;
+}
+
+static int
+dhdpcie_sssr_saqm_header(dhd_pub_t *dhd, uint *buf, uint32 data_len)
+{
+ int len = 0;
+ int ret = 0;
+
+ switch (dhd->sssr_reg_info->rev2.version) {
+ case SSSR_REG_INFO_VER_5:
+ {
+ sssr_header_t sssr_header;
+ uint32 war_reg = 0;
+ bzero(&sssr_header, sizeof(sssr_header_t));
+ dhdpcie_sssr_common_header(dhd, &sssr_header);
+ sssr_header.data_len = data_len;
+ sssr_header.coreid = D11_SAQM_CORE_ID;
+ ret = dhd_sssr_saqm_war_reg(dhd, &war_reg);
+ if (ret == BCME_OK) {
+ sssr_header.war_reg = war_reg;
+ }
+ (void)memcpy_s(buf, data_len, &sssr_header, sizeof(sssr_header_t));
+ len = sizeof(sssr_header_t);
+ }
+ break;
+ default:
+ len = 0;
+ }
+
+ return len;
+}
+
+static bool
+dhdpcie_saqm_check_outofreset(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ uint save_idx, save_unit;
+ uint saqm_buf_size = 0;
+ bool ret = FALSE;
+
+ save_idx = si_coreidx(bus->sih);
+ save_unit = si_coreunit(bus->sih);
+
+ saqm_buf_size = dhd_sssr_saqm_buf_size(dhdp);
+
+ if ((saqm_buf_size > 0) && si_setcore(bus->sih, D11_SAQM_CORE_ID, 0)) {
+ ret = si_iscoreup(bus->sih);
+ DHD_PRINT(("dhdpcie_saqm_check_outofreset si_isup %d\n",
+ si_iscoreup(bus->sih)));
+ si_setcore(bus->sih, save_idx, save_unit);
+ }
+
+ return ret;
+}
+
#ifdef DHD_SSSR_DUMP_BEFORE_SR
static int
dhdpcie_sssr_dump_get_before_sr(dhd_pub_t *dhd)
{
int i;
- uint32 sr_size, xmtaddress, xmtdata, dig_buf_size, dig_buf_addr;
+ uint32 sr_size, xmtaddress, xmtdata, dig_buf_size,
+ dig_buf_addr, saqm_buf_size, saqm_buf_addr;
uint8 num_d11cores;
+ uint32 d11_header_len = 0;
+ uint32 dig_header_len = 0;
+ uint32 saqm_header_len = 0;
+ uint *d11_buffer;
+ uint *dig_buffer;
+ uint *saqm_buffer;
DHD_PRINT(("%s\n", __FUNCTION__));
@@ -19948,17 +20300,34 @@ dhdpcie_sssr_dump_get_before_sr(dhd_pub_t *dhd)
sr_size = dhd_sssr_mac_buf_size(dhd, i);
xmtaddress = dhd_sssr_mac_xmtaddress(dhd, i);
xmtdata = dhd_sssr_mac_xmtdata(dhd, i);
- dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_before[i],
- sr_size, xmtaddress, xmtdata);
+ d11_buffer = dhd->sssr_d11_before[i];
+ d11_header_len = dhdpcie_sssr_d11_header(dhd, d11_buffer, sr_size, i);
+ /* D11 buffer starts right after sssr d11 header */
+ d11_buffer = (uint *)((char *)d11_buffer + d11_header_len);
+ dhdpcie_get_sssr_fifo_dump(dhd, d11_buffer, sr_size, xmtaddress, xmtdata);
}
}
dig_buf_size = dhd_sssr_dig_buf_size(dhd);
dig_buf_addr = dhd_sssr_dig_buf_addr(dhd);
if (dig_buf_size) {
- dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_before,
- dig_buf_size, dig_buf_addr);
+ dig_buffer = dhd->sssr_dig_buf_before;
+ dig_header_len = dhdpcie_sssr_dig_header(dhd, dig_buffer, dig_buf_size);
+ /* Dig buffer starts right after sssr dig header */
+ dig_buffer = (uint *)((char *)dig_buffer + dig_header_len);
+ dhdpcie_get_sssr_dig_dump(dhd, dig_buffer, dig_buf_size, dig_buf_addr);
}
+
+ saqm_buf_size = dhd_sssr_saqm_buf_size(dhd);
+ saqm_buf_addr = dhd_sssr_saqm_buf_addr(dhd);
+ if (saqm_buf_size) {
+ saqm_buffer = dhd->sssr_saqm_buf_before;
+ saqm_header_len = dhdpcie_sssr_saqm_header(dhd, saqm_buffer, saqm_buf_size);
+ /* saqm buffer starts right after saqm header */
+ saqm_buffer = (uint *)((char *)saqm_buffer + saqm_header_len);
+ dhdpcie_get_sssr_saqm_dump(dhd, saqm_buffer, saqm_buf_size, saqm_buf_addr);
+ }
+
return BCME_OK;
}
#endif /* DHD_SSSR_DUMP_BEFORE_SR */
@@ -19967,8 +20336,16 @@ static int
dhdpcie_sssr_dump_get_after_sr(dhd_pub_t *dhd)
{
int i;
- uint32 sr_size, xmtaddress, xmtdata, dig_buf_size, dig_buf_addr;
+ uint32 sr_size, xmtaddress, xmtdata, dig_buf_size,
+ dig_buf_addr, saqm_buf_size, saqm_buf_addr;
+
uint8 num_d11cores;
+ uint32 d11_header_len = 0;
+ uint32 dig_header_len = 0;
+ uint32 saqm_header_len = 0;
+ uint *d11_buffer;
+ uint *dig_buffer;
+ uint *saqm_buffer;
DHD_PRINT(("%s\n", __FUNCTION__));
@@ -19979,15 +20356,32 @@ dhdpcie_sssr_dump_get_after_sr(dhd_pub_t *dhd)
sr_size = dhd_sssr_mac_buf_size(dhd, i);
xmtaddress = dhd_sssr_mac_xmtaddress(dhd, i);
xmtdata = dhd_sssr_mac_xmtdata(dhd, i);
- dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_after[i],
- sr_size, xmtaddress, xmtdata);
+ d11_buffer = dhd->sssr_d11_after[i];
+ d11_header_len = dhdpcie_sssr_d11_header(dhd, d11_buffer, sr_size, i);
+ /* D11 buffer starts right after sssr d11 header */
+ d11_buffer = (uint *)((char *)d11_buffer + d11_header_len);
+ dhdpcie_get_sssr_fifo_dump(dhd, d11_buffer, sr_size, xmtaddress, xmtdata);
}
}
dig_buf_size = dhd_sssr_dig_buf_size(dhd);
dig_buf_addr = dhd_sssr_dig_buf_addr(dhd);
if (dig_buf_size) {
- dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_after, dig_buf_size, dig_buf_addr);
+ dig_buffer = dhd->sssr_dig_buf_after;
+ dig_header_len = dhdpcie_sssr_dig_header(dhd, dig_buffer, dig_buf_size);
+ /* Dig buffer starts right after sssr dig header */
+ dig_buffer = (uint *)((char *)dig_buffer + dig_header_len);
+ dhdpcie_get_sssr_dig_dump(dhd, dig_buffer, dig_buf_size, dig_buf_addr);
+ }
+
+ saqm_buf_size = dhd_sssr_saqm_buf_size(dhd);
+ saqm_buf_addr = dhd_sssr_saqm_buf_addr(dhd);
+ if (saqm_buf_size) {
+ saqm_buffer = dhd->sssr_saqm_buf_after;
+ saqm_header_len = dhdpcie_sssr_saqm_header(dhd, saqm_buffer, saqm_buf_size);
+ /* saqm buffer starts right after saqm header */
+ saqm_buffer = (uint *)((char *)saqm_buffer + saqm_header_len);
+ dhdpcie_get_sssr_saqm_dump(dhd, saqm_buffer, saqm_buf_size, saqm_buf_addr);
}
return BCME_OK;
@@ -20047,6 +20441,10 @@ dhdpcie_validate_gci_chip_intstatus(dhd_pub_t *dhd)
#define OOBR_DMP_D11_MAIN 0x1u
#define OOBR_DMP_D11_AUX 0x2u
#define OOBR_DMP_D11_SCAN 0x4u
+
+#define OOBR_CAP2_NUMTOPEXTRSRC_MASK 0x1Fu
+#define OOBR_CAP2_NUMTOPEXTRSRC_SHIFT 4u /* Bits 8:4 */
+
static void
dhdpcie_dump_oobr(dhd_pub_t *dhd, uint core_bmap, uint coreunit_bmap)
{
@@ -20064,14 +20462,31 @@ dhdpcie_dump_oobr(dhd_pub_t *dhd, uint core_bmap, uint coreunit_bmap)
curcore = si_coreid(dhd->bus->sih);
if ((reg = si_setcore(sih, HND_OOBR_CORE_ID, 0)) != NULL) {
+ uint corecap2 = R_REG(dhd->osh, &reg->capability2);
+ uint numtopextrsrc = (corecap2 >> OOBR_CAP2_NUMTOPEXTRSRC_SHIFT) &
+ OOBR_CAP2_NUMTOPEXTRSRC_MASK;
+ /*
+ * Convert the value (8:4) to a loop count to dump topextrsrcmap.
+ * TopRsrcDestSel0 is accessible if NUM_TOP_EXT_RSRC > 0
+ * TopRsrcDestSel1 is accessible if NUM_TOP_EXT_RSRC > 4
+ * TopRsrcDestSel2 is accessible if NUM_TOP_EXT_RSRC > 8
+ * TopRsrcDestSel3 is accessible if NUM_TOP_EXT_RSRC > 12
+ * 0 --> 0
+ * 1-3 --> 1 (TopRsrcDestSel0)
+ * 4-7 --> 2 (TopRsrcDestSel1/0)
+ * 8 - 11 --> 3 (TopRsrcDestSel2/1/0)
+ * 12 - 15 --> 4 (TopRsrcDestSel3/2/1/0)
+ */
+ numtopextrsrc = numtopextrsrc ? (numtopextrsrc / 4) + 1 : numtopextrsrc;
+ DHD_PRINT(("reg: corecap2:0x%x numtopextrsrc: %d\n", corecap2, numtopextrsrc));
+ for (i = 0; i < numtopextrsrc; ++i) {
+ val = R_REG(dhd->osh, &reg->topextrsrcmap[i]);
+ DHD_PRINT(("reg: hndoobr_reg->topextrsrcmap[%d] = 0x%x\n", i, val));
+ }
for (i = 0; i < 4; ++i) {
val = R_REG(dhd->osh, &reg->intstatus[i]);
DHD_PRINT(("reg: hndoobr_reg->intstatus[%d] = 0x%x\n", i, val));
}
- for (i = 0; i < 4; ++i) {
- val = R_REG(dhd->osh, &reg->topextrsrcmap[i]);
- DHD_PRINT(("reg: hndoobr_reg->topextrsrcmap[%d] = 0x%x\n", i, val));
- }
if (core_bmap & OOBR_DMP_FOR_D11) {
for (i = 0; coreunit_bmap != 0; ++i) {
if (coreunit_bmap & mask) {
@@ -20101,6 +20516,8 @@ dhdpcie_sssr_dump(dhd_pub_t *dhd)
uint32 pwrreq_val = 0;
si_t *sih = dhd->bus->sih;
uint core_bmap = 0, coreunit_bmap = 0;
+ uint32 old_max_resmask = 0, min_resmask = 0, val = 0;
+ bool saqm_isup = FALSE;
if (!dhd->sssr_inited) {
DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
@@ -20131,6 +20548,8 @@ dhdpcie_sssr_dump(dhd_pub_t *dhd)
PMU_REG(sih, RsrcState, 0, 0)));
dhdpcie_d11_check_outofreset(dhd);
+ saqm_isup = dhdpcie_saqm_check_outofreset(dhd);
+ DHD_PRINT(("%s: Before WL down, SAQM core up state is %d\n", __FUNCTION__, saqm_isup));
#ifdef DHD_SSSR_DUMP_BEFORE_SR
DHD_PRINT(("%s: Collecting Dump before SR\n", __FUNCTION__));
@@ -20140,57 +20559,80 @@ dhdpcie_sssr_dump(dhd_pub_t *dhd)
}
#endif /* DHD_SSSR_DUMP_BEFORE_SR */
- dhdpcie_clear_intmask_and_timer(dhd);
- dhdpcie_clear_clk_req(dhd);
- powerctrl_val = dhdpcie_suspend_chipcommon_powerctrl(dhd);
- dhdpcie_pcie_send_ltrsleep(dhd);
+ /* Read Min and Max resource mask */
+ dhd_sbreg_op(dhd, dhd->sssr_reg_info->rev5.pmu_regs.base_regs.pmu_max_res_mask,
+ &old_max_resmask, TRUE);
+ dhd_sbreg_op(dhd, dhd->sssr_reg_info->rev5.pmu_regs.base_regs.pmu_min_res_mask,
+ &min_resmask, TRUE);
+ if (dhd->sssr_reg_info->rev2.version >= SSSR_REG_INFO_VER_5) {
+ dhdpcie_arm_clear_clk_req(dhd);
+ dhdpcie_saqm_clear_clk_req(dhd);
+ dhdpcie_pcie_send_ltrsleep(dhd);
+ /* MaxRsrcMask is updated to bring down the resources for rev5 and above */
+ val = dhd->sssr_reg_info->rev5.pmu_regs.base_regs.sssr_max_res_mask | min_resmask;
+ dhd_sbreg_op(dhd, dhd->sssr_reg_info->rev5.pmu_regs.base_regs.pmu_max_res_mask,
+ &val, FALSE);
+ /* Wait for some time before Restore */
+ OSL_DELAY(100 * 1000);
+ } else {
+ dhdpcie_clear_intmask_and_timer(dhd);
+ dhdpcie_clear_clk_req(dhd);
+ powerctrl_val = dhdpcie_suspend_chipcommon_powerctrl(dhd);
+ dhdpcie_pcie_send_ltrsleep(dhd);
- /* save current pwr req state and clear pwr req for all domains */
- pwrreq_val = si_srpwr_request(sih, 0, 0);
- pwrreq_val >>= SRPWR_REQON_SHIFT;
- pwrreq_val &= SRPWR_DMN_ALL_MASK(sih);
- DHD_PRINT(("%s: clear pwr req all domains\n", __FUNCTION__));
- si_srpwr_request(sih, SRPWR_DMN_ALL_MASK(sih), 0);
+ /* save current pwr req state and clear pwr req for all domains */
+ pwrreq_val = si_srpwr_request(sih, 0, 0);
+ pwrreq_val >>= SRPWR_REQON_SHIFT;
+ pwrreq_val &= SRPWR_DMN_ALL_MASK(sih);
+ DHD_PRINT(("%s: clear pwr req all domains\n", __FUNCTION__));
+ si_srpwr_request(sih, SRPWR_DMN_ALL_MASK(sih), 0);
- if (MULTIBP_ENAB(sih)) {
- dhd_bus_pcie_pwr_req_wl_domain(dhd->bus, CC_REG_OFF(PowerControl), FALSE);
+ if (MULTIBP_ENAB(sih)) {
+ dhd_bus_pcie_pwr_req_wl_domain(dhd->bus, CC_REG_OFF(PowerControl), FALSE);
+ }
+ /* Wait for some time before Restore */
+ OSL_DELAY(10000);
}
-
- /* Wait for some time before Restore */
- OSL_DELAY(10000);
pwrctrl = si_corereg(sih, 0, CC_REG_OFF(PowerControl), 0, 0);
DHD_PRINT(("%s: After WL down (powerctl: pcie:0x%x chipc:0x%x) "
- "PMU rctl:0x%x res_state:0x%x\n", __FUNCTION__,
+ "PMU rctl:0x%x res_state:0x%x old_max_resmask:0x%x min_resmask:0x%x "
+ "sssr_max_res_mask:0x%x max_resmask:0x%x\n", __FUNCTION__,
si_corereg(sih, sih->buscoreidx, CC_REG_OFF(PowerControl), 0, 0),
pwrctrl, PMU_REG(sih, RetentionControl, 0, 0),
- PMU_REG(sih, RsrcState, 0, 0)));
+ PMU_REG(sih, RsrcState, 0, 0), old_max_resmask, min_resmask,
+ dhd->sssr_reg_info->rev5.pmu_regs.base_regs.sssr_max_res_mask,
+ PMU_REG(sih, MaxResourceMask, 0, 0)));
+ if (dhd->sssr_reg_info->rev2.version >= SSSR_REG_INFO_VER_5) {
+ dhd_sbreg_op(dhd, dhd->sssr_reg_info->rev5.pmu_regs.base_regs.pmu_max_res_mask,
+ &old_max_resmask, FALSE);
+ }
if (MULTIBP_ENAB(sih)) {
if ((pwrctrl >> SRPWR_STATUS_SHIFT) & SRPWR_DMN1_ARMBPSD_MASK) {
DHD_ERROR(("DIG Domain is not going down. The DIG SSSR is not valid.\n"));
}
- if ((pwrctrl >> SRPWR_STATUS_SHIFT) & SRPWR_DMN2_MACAUX) {
+ if ((pwrctrl >> SRPWR_STATUS_SHIFT) & SRPWR_DMN2_MACAUX_MASK) {
DHD_ERROR(("MAC AUX Domain is not going down.\n"));
core_bmap |= OOBR_DMP_FOR_D11;
coreunit_bmap |= OOBR_DMP_D11_AUX;
}
- if ((pwrctrl >> SRPWR_STATUS_SHIFT) & SRPWR_DMN3_MACMAIN) {
- DHD_ERROR(("MAC MAIN Domain is not going down.\n"));
+ if ((pwrctrl >> SRPWR_STATUS_SHIFT) & SRPWR_DMN3_MACMAIN_MASK) {
+ DHD_ERROR(("MAC MAIN Domain is not going down\n"));
core_bmap |= OOBR_DMP_FOR_D11;
coreunit_bmap |= OOBR_DMP_D11_MAIN;
}
- if ((pwrctrl >> SRPWR_STATUS_SHIFT) & SRPWR_DMN4_MACSCAN) {
+ if ((pwrctrl >> SRPWR_STATUS_SHIFT) & SRPWR_DMN4_MACSCAN_MASK) {
DHD_ERROR(("MAC SCAN Domain is not going down.\n"));
core_bmap |= OOBR_DMP_FOR_D11;
coreunit_bmap |= OOBR_DMP_D11_SCAN;
}
- if ((pwrctrl >> SRPWR_STATUS_SHIFT) & SRPWR_DMN6_SAQM) {
+ if ((pwrctrl >> SRPWR_STATUS_SHIFT) & SRPWR_DMN6_SAQM_MASK) {
DHD_ERROR(("SAQM Domain is not going down.\n"));
core_bmap |= OOBR_DMP_FOR_SAQM;
}
@@ -20204,36 +20646,51 @@ dhdpcie_sssr_dump(dhd_pub_t *dhd)
OSL_DELAY(15000);
DHD_PRINT(("%s: After WL up again (powerctl: pcie:0x%x chipc:0x%x) "
- "PMU rctl:0x%x res_state:0x%x\n", __FUNCTION__,
+ "PMU rctl:0x%x res_state:0x%x old_max_resmask:0x%x "
+ "min_resmask:0x%x sssr_max_res_mask:0x%x "
+ "max_resmask:0x%x\n", __FUNCTION__,
si_corereg(sih, sih->buscoreidx,
CC_REG_OFF(PowerControl), 0, 0),
si_corereg(sih, 0, CC_REG_OFF(PowerControl), 0, 0),
PMU_REG(sih, RetentionControl, 0, 0),
- PMU_REG(sih, RsrcState, 0, 0)));
+ PMU_REG(sih, RsrcState, 0, 0), old_max_resmask, min_resmask,
+ dhd->sssr_reg_info->rev5.pmu_regs.base_regs.sssr_max_res_mask,
+ PMU_REG(sih, MaxResourceMask, 0, 0)));
}
dhdpcie_resume_chipcommon_powerctrl(dhd, powerctrl_val);
dhdpcie_arm_resume_clk_req(dhd);
- /* Before collecting SSSR dump explicitly request power
- * for main and aux domains as per recommendation
- * of ASIC team
- */
- si_srpwr_request(sih, SRPWR_DMN_ALL_MASK(sih), SRPWR_DMN_ALL_MASK(sih));
+ if (dhd->sssr_reg_info->rev2.version <= SSSR_REG_INFO_VER_4) {
+ /* Before collecting SSSR dump explicitly request power
+ * for main and aux domains as per recommendation
+ * of ASIC team
+ */
+ si_srpwr_request(sih, SRPWR_DMN_ALL_MASK(sih), SRPWR_DMN_ALL_MASK(sih));
+ }
- if (dhd->sssr_reg_info->rev2.version >= SSSR_REG_INFO_VER_4) {
+ if (dhd->sssr_reg_info->rev2.version == SSSR_REG_INFO_VER_4) {
dhdpcie_bring_saqm_updown(dhd, TRUE);
+ } else if (dhd->sssr_reg_info->rev2.version == SSSR_REG_INFO_VER_5) {
+ dhdpcie_bring_saqm_updown(dhd, FALSE);
}
dhdpcie_bring_d11_outofreset(dhd);
- if (dhd->sssr_reg_info->rev2.version >= SSSR_REG_INFO_VER_4) {
+ if (dhd->sssr_reg_info->rev2.version == SSSR_REG_INFO_VER_4) {
dhdpcie_bring_saqm_updown(dhd, FALSE);
}
/* Add delay for d11 cores out of reset */
OSL_DELAY(6000);
+ saqm_isup = dhdpcie_saqm_check_outofreset(dhd);
+ DHD_PRINT(("%s: After WL UP and out of reset, SAQM core up state is %d\n",
+ __FUNCTION__, saqm_isup));
+ if (saqm_isup && (dhd->sssr_reg_info->rev2.version >= SSSR_REG_INFO_VER_5)) {
+ dhdpcie_saqm_clear_force_sr_all(dhd);
+ }
+
DHD_PRINT(("%s: Collecting Dump after SR\n", __FUNCTION__));
if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) {
DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__));
diff --git a/dhd_pcie_linux.c b/dhd_pcie_linux.c
index 5143064..7faff68 100644
--- a/dhd_pcie_linux.c
+++ b/dhd_pcie_linux.c
@@ -87,9 +87,9 @@
#include <dhd_plat.h>
-#if defined(WBRC) && defined(WBRC_TEST)
+#if defined(WBRC)
#include <wb_regon_coordinator.h>
-#endif /* WBRC && WBRC_TEST */
+#endif /* WBRC */
#define PCI_CFG_RETRY 10 /* PR15065: retry count for pci cfg accesses */
#define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */
@@ -2244,6 +2244,10 @@ int dhdpcie_init(struct pci_dev *pdev)
dhdpcie_smmu_info_t *dhdpcie_smmu_info = NULL;
#endif /* USE_SMMU_ARCH_MSM */
int ret = 0;
+#if defined(WBRC) && defined(BCMDHD_MODULAR)
+ int wbrc_ret = 0;
+ uint16 chipid = 0;
+#endif /* WBRC && BCMDHD_MODULAR */
do {
/* osl attach */
@@ -2450,6 +2454,12 @@ int dhdpcie_init(struct pci_dev *pdev)
bus->dhd->mac.octet[2] = 0x4C;
}
+#if defined(WBRC) && defined(BCMDHD_MODULAR)
+ wbrc_ret = wbrc_init();
+ chipid = dhd_get_chipid(bus);
+ BCM_REFERENCE(chipid);
+#endif /* WBRC && BCMDHD_MODULAR */
+
/* Attach to the OS network interface */
DHD_TRACE(("%s(): Calling dhd_register_if() \n", __FUNCTION__));
if (dhd_attach_net(bus->dhd, TRUE)) {
@@ -2457,6 +2467,16 @@ int dhdpcie_init(struct pci_dev *pdev)
break;
}
+#if defined(WBRC) && defined(BCMDHD_MODULAR)
+ if (!wbrc_ret) {
+#ifdef WBRC_HW_QUIRKS
+ wl2wbrc_wlan_init(bus->dhd, chipid);
+#else
+ wl2wbrc_wlan_init(bus->dhd);
+#endif /* WBRC_HW_QUIRKS */
+ }
+#endif /* WBRC && BCMDHD_MODULAR */
+
dhdpcie_init_succeeded = TRUE;
#if defined(CONFIG_ARCH_MSM) && defined(CONFIG_SEC_PCIE_L1SS)
sec_pcie_set_use_ep_loaded(bus->rc_dev);
diff --git a/dhd_proto.h b/dhd_proto.h
index 579ed30..99786af 100644
--- a/dhd_proto.h
+++ b/dhd_proto.h
@@ -226,8 +226,10 @@ extern void dhd_lb_rx_process_handler(unsigned long data);
extern int dhd_prot_h2d_mbdata_send_ctrlmsg(dhd_pub_t *dhd, uint32 mb_data);
#ifdef BCMPCIE
+#ifdef DHD_TIMESYNC
extern int dhd_prot_send_host_timestamp(dhd_pub_t *dhdp, uchar *tlv, uint16 tlv_len,
uint16 seq, uint16 xt_id);
+#endif /* DHD_TIMESYNC */
extern bool dhd_prot_data_path_tx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set);
extern bool dhd_prot_data_path_rx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set);
extern bool dhd_prot_pkt_noretry(dhd_pub_t *dhd, bool enable, bool set);
@@ -235,7 +237,9 @@ extern bool dhd_prot_pkt_noaggr(dhd_pub_t *dhd, bool enable, bool set);
extern bool dhd_prot_pkt_fixed_rate(dhd_pub_t *dhd, bool enable, bool set);
extern bool dhd_prot_pkt_rts_protect(dhd_pub_t *dhd, bool enable, bool set);
#else /* BCMPCIE */
+#ifdef DHD_TIMESYNC
#define dhd_prot_send_host_timestamp(a, b, c, d, e) 0
+#endif /* DHD_TIMESYNC */
#define dhd_prot_data_path_tx_timestamp_logging(a, b, c) 0
#define dhd_prot_data_path_rx_timestamp_logging(a, b, c) 0
#endif /* BCMPCIE */
diff --git a/dhd_rtt.c b/dhd_rtt.c
index 7a1ff43..d5cad4d 100644
--- a/dhd_rtt.c
+++ b/dhd_rtt.c
@@ -278,6 +278,7 @@ static const int burst_duration_idx[] = {0, 0, 1, 2, 4, 8, 16, 32, 64, 128, 0,
/* ftm status mapping to host status */
static const ftm_status_map_host_entry_t ftm_status_map_info[] = {
+ {WL_PROXD_E_CHANSW, RTT_STATUS_FAIL_CHANSW},
{WL_PROXD_E_INCOMPLETE, RTT_STATUS_FAILURE},
{WL_PROXD_E_OVERRIDDEN, RTT_STATUS_FAILURE},
{WL_PROXD_E_ASAP_FAILED, RTT_STATUS_FAILURE},
diff --git a/dhd_rtt.h b/dhd_rtt.h
index 161c762..fce6522 100644
--- a/dhd_rtt.h
+++ b/dhd_rtt.h
@@ -121,7 +121,8 @@ typedef enum rtt_reason {
RTT_STATUS_INVALID_REQ = 13, // bad request args
RTT_STATUS_NO_WIFI = 14, // WiFi not enabled Responder overrides param info
// cannot range with new params
- RTT_STATUS_FAIL_FTM_PARAM_OVERRIDE = 15
+ RTT_STATUS_FAIL_FTM_PARAM_OVERRIDE = 15,
+ RTT_STATUS_FAIL_CHANSW = 16 // failed due to channel switch
} rtt_reason_t;
enum {
diff --git a/include/802.11.h b/include/802.11.h
index a7b4456..6dffe86 100644
--- a/include/802.11.h
+++ b/include/802.11.h
@@ -1632,8 +1632,13 @@ enum dot11_tag_ids {
#define EXT_MNG_AKM_SUITE_SELECTOR_ID 114u /* AKM Suite Selector */
#define DOT11_MNG_AKM_SUITE_SELECTOR_ID (DOT11_MNG_ID_EXT_ID + \
EXT_MNG_AKM_SUITE_SELECTOR_ID)
+/* Draft 802.11be D3.0 Table 9-128 Element IDs */
+#define EXT_MNG_MLO_LINK_INFO_ID 133u /* MLO Link Information */
+#define DOT11_MNG_MLO_LINK_INFO_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_MLO_LINK_INFO_ID)
#define EXT_MNG_AID_BITMAP_ID 134u /* AID Bitmap */
#define DOT11_MNG_AID_BITMAP_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_AID_BITMAP_ID)
+#define EXT_MNG_BW_IND_ID 135u /* Bandwidth Indication */
+#define DOT11_MNG_BW_IND_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_BW_IND_ID)
/* deprecated definitions, do not use, to be deleted later */
#define FILS_HLP_CONTAINER_EXT_ID FILS_EXTID_MNG_HLP_CONTAINER_ID
diff --git a/include/bcmdefs.h b/include/bcmdefs.h
index 49a584f..578987e 100644
--- a/include/bcmdefs.h
+++ b/include/bcmdefs.h
@@ -877,6 +877,19 @@ extern uint32 gFWID;
#define URB_ENAB() (FALSE)
#endif /* URB */
+#ifdef UDCC /* UDCC support enab macros */
+ extern bool _udcc_enab;
+#if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+ #define UDCC_ENAB() (_udcc_enab)
+#elif defined(UDCC_DISABLED)
+ #define UDCC_ENAB() (FALSE)
+#else
+ #define UDCC_ENAB() (TRUE)
+#endif
+#else
+ #define UDCC_ENAB() (FALSE)
+#endif /* UDCC */
+
#ifdef TX_HISTOGRAM
extern bool _tx_histogram_enabled;
#if defined(ROM_ENAB_RUNTIME_CHECK)
diff --git a/include/bcmerror.h b/include/bcmerror.h
index d889313..b709630 100644
--- a/include/bcmerror.h
+++ b/include/bcmerror.h
@@ -241,7 +241,9 @@ typedef int bcmerror_t;
/* FTM error codes [-1024, -2047] */
enum {
- WL_FTM_E_LAST = -1092,
+ WL_FTM_E_LAST = -1094,
+ WL_FTM_E_SUPPRESS = -1094,
+ WL_FTM_E_NO_CSI_DATA = -1093,
WL_FTM_E_PHY_CSI_FATAL_ERR = -1092,
WL_FTM_E_FORCE_DELETED = -1091,
WL_FTM_E_ONE_WAY_RTT = -1090,
@@ -319,7 +321,8 @@ typedef int32 wl_ftm_status_t;
#ifdef BCMUTILS_ERR_CODES
/* begin proxd codes compatible w/ ftm above - obsolete DO NOT extend */
enum {
- WL_PROXD_E_LAST = -1058,
+ WL_PROXD_E_LAST = -1059,
+ WL_PROXD_E_CHANSW = -1059,
WL_PROXD_E_PKTFREED = -1058,
WL_PROXD_E_ASSOC_INPROG = -1057,
WL_PROXD_E_NOAVAIL = -1056,
diff --git a/include/bcmevent.h b/include/bcmevent.h
index 682e6ad..a6b2af9 100644
--- a/include/bcmevent.h
+++ b/include/bcmevent.h
@@ -1082,6 +1082,8 @@ typedef enum wl_nan_events {
WL_NAN_EVENT_OOB_AF_RXTIMEOUT = 54, /* OOB AF rx timeout */
WL_NAN_EVENT_DW_DWELL_BCN_LOST = 55, /* DW Dwell bcn rx fail */
WL_NAN_EVENT_SUSPENSION_IND = 56, /* Suspension Start/Stop status Indicatin */
+ WL_NAN_EVENT_TETHER_PEER_ADD = 57, /* NAN Tether client added on peer side */
+ WL_NAN_EVENT_TETHER_PEER_DEL = 58, /* NAN Tether client deleted on peer side */
/* keep WL_NAN_EVENT_INVALID as the last element */
WL_NAN_EVENT_INVALID /* delimiter for max value */
} nan_app_events_e;
@@ -1844,4 +1846,13 @@ typedef BWL_PRE_PACKED_STRUCT struct sup_wpa_timing_prop_ie {
} BWL_POST_PACKED_STRUCT sup_wpa_timing_prop_ie_t;
#include <packed_section_end.h>
+
+/* NAN tether event structure
+* It is sent to the host encapsulated within xtlv WL_NAN_XTLV_TETHER.
+*/
+typedef struct nan_tether_event_s {
+ uint8 if_idx; /* iface index of NDI suporting tether peer */
+ uint8 num_clients; /* num of end clients below */
+ uint8 end_clients[]; /* SA of end clients: n*ETHER_ADDR_LEN */
+} nan_tether_event_t;
#endif /* _BCMEVENT_H_ */
diff --git a/include/bcmutils.h b/include/bcmutils.h
index c4fd819..fcd9925 100644
--- a/include/bcmutils.h
+++ b/include/bcmutils.h
@@ -902,6 +902,7 @@ xor_128bit_block(const uint8 *src1, const uint8 *src2, uint8 *dst)
/* crc */
uint8 hndcrc8(const uint8 *p, uint nbytes, uint8 crc);
uint16 hndcrc16(const uint8 *p, uint nbytes, uint16 crc);
+uint16 hndcrc16ansi(const uint8 *p, uint nbytes, uint16 crc);
uint32 hndcrc32(const uint8 *p, uint nbytes, uint32 crc);
/* format/print */
@@ -1017,7 +1018,7 @@ typedef struct {
extern void pktlist_add(pktlist_info_t *pktlist, void *p, int len, char *file);
extern void pktlist_remove(pktlist_info_t *pktlist, void *p);
-extern char* pktlist_dump(pktlist_info_t *pktlist, char *buf);
+extern char* pktlist_dump(pktlist_info_t *pktlist, char *buf, uint bufsz);
#ifdef BCMDBG_PTRACE
extern void pktlist_trace(pktlist_info_t *pktlist, void *pkt, uint16 bit);
#endif /* BCMDBG_PTRACE */
diff --git a/include/dnglioctl.h b/include/dnglioctl.h
index aa4679f..2d60499 100644
--- a/include/dnglioctl.h
+++ b/include/dnglioctl.h
@@ -360,108 +360,4 @@ enum dsec_sboot_xtlv_id {
DSEC_OTP_XTLV_SBOOT_LOT_NUM_MS = 29u, /* Chip lot num high bits [17:47] 31 bits */
DSEC_OTP_XTLV_SBOOT_OTP_WR_LOCK_ENAB = 30u, /* OTP write lock enable bit */
};
-
-#define CAPEXT_INFO_VERSION_1 (1u)
-#define CAPEXT_INFO_VERSION CAPEXT_INFO_VERSION_1
-
-/* Top structure of capext reporting. For reporting, feature ids are used as types in XTLVs */
-typedef struct {
- uint16 version; /**< see definition of CAPEXT_INFO_VERSION */
- uint16 datalen; /**< length of data including all paddings. */
- uint8 data []; /**< variable length payload:
- * 1 or more bcm_xtlv_t type of tuples.
- * each tuple is padded to multiple of 4 bytes.
- * 'datalen' field of this structure includes all paddings.
- */
-} capext_info_t;
-
-/* Each feature reported in capext has a feature id. Feature id is a 16-bit value.
- * The feature id namespace is split into 3 partitions. One for BUS, the second for RTE,
- * and the third for WL. All partitions are contiguous and fixed in size
- */
-#define CAPEXT_FEATURE_ID_NUM_PARTITIONS (3u)
-#define CAPEXT_FEATURE_ID_PARTITION_SIZE (1024u)
-/* Feature IDs from 3072 for capext are reserved */
-#define CAPEXT_RSVD_FEATURE_ID_BASE (3072u)
-
-/* Bus partition */
-/* The features listed in the enumeration below have subfeatures.
- * If a new feature is added/updated and that feature has sub-features that need to be reported,
- * add that feature here
- */
-#define CAPEXT_BUS_FEATURE_ID_BASE (0)
-enum capext_bus_feature_ids {
- CAPEXT_BUS_FEATURE_RSVD = (CAPEXT_BUS_FEATURE_ID_BASE + 0),
- /* BUS top level feature id to hold and report bitmaps of features with and
- * without sub-features.
- */
- CAPEXT_BUS_FEATURE_BUS_FEATURES = (CAPEXT_BUS_FEATURE_ID_BASE + 1),
- /* BUS feature ids below hold and report sub-feature bitmaps of some features
- * mentioned in top level feature id bitmap
- */
- CAPEXT_BUS_FEATURE_PKTLAT = (CAPEXT_BUS_FEATURE_ID_BASE + 2),
- CAPEXT_BUS_FEATURE_MAX
-};
-
-/* BUS features bit positions in top level rte feature id. Features mentioned below are reported */
-enum capext_bus_feature_bitpos {
- CAPEXT_BUS_FEATURE_BITPOS_HP2P = 0,
- CAPEXT_BUS_FEATURE_BITPOS_PTM = 1,
- CAPEXT_BUS_FEATURE_BITPOS_PKTLAT = 2,
- CAPEXT_BUS_FEATURE_BITPOS_BUSTPUT = 3, /* feature with sub-features */
- CAPEXT_BUS_FEATURE_BITPOS_MAX
-};
-
-/* Packet latency sub-feature bit positions. These sub-features need to be reported */
-enum capext_pktlat_subfeature_bitpos {
- CAPEXT_PKTLAT_BITPOS_META = 0,
- CAPEXT_PKTLAT_BITPOS_IPC = 1,
- CAPEXT_PKTLAT_BITPOS_MAX
-};
-
-/* RTE partition */
-/* The features listed in the enumeration below have subfeatures.
- * If a new feature is added and that feature has sub-features that need to be reported,
- * add that feature here
- */
-#define CAPEXT_RTE_FEATURE_ID_BASE (1024u)
-enum capext_rte_feature_ids {
- CAPEXT_RTE_FEATURE_RSVD = (CAPEXT_RTE_FEATURE_ID_BASE + 0),
- /* RTE top level feature id to hold and report bitmaps of features with and
- * without sub-features.
- */
- CAPEXT_RTE_FEATURE_RTE_FEATURES = (CAPEXT_RTE_FEATURE_ID_BASE + 1),
- /* RTE feature ids below hold and report sub-feature bitmaps of some features
- * mentioned in top level feature id bitmap
- */
- CAPEXT_RTE_FEATURE_ECOUNTERS = (CAPEXT_RTE_FEATURE_ID_BASE + 2),
- CAPEXT_RTE_FEATURE_MAX
-};
-
-/* Ecounters sub-feature bit positions. These sub-features need to be reported */
-enum capext_ecounters_subfeature_bitpos {
- CAPEXT_ECOUNTERS_BITPOS_TXHIST = 0,
- CAPEXT_ECOUNTERS_BITPOS_ADV = 1,
- CAPEXT_ECOUNTERS_BITPOS_PHY = 2,
- CAPEXT_ECOUNTERS_BITPOS_PHY_CAL = 3,
- CAPEXT_ECOUNTERS_BITPOS_CHSTATS = 4,
- CAPEXT_ECOUNTERS_BITPOS_PEERSTATS = 5,
- CAPEXT_ECOUNTERS_BITPOS_DTIM_MISS = 6,
- CAPEXT_ECOUNTERS_BITPOS_MAX
-};
-
-/* RTE features bit positions in top level rte feature id. Features mentioned below are reported */
-enum capext_rte_feature_bitpos {
- CAPEXT_RTE_FEATURE_BITPOS_H2D_LOG_TIME_SYNC = 0,
- CAPEXT_RTE_FEATURE_BITPOS_HWRNG = 1,
- CAPEXT_RTE_FEATURE_BITPOS_SPMI = 2,
- CAPEXT_RTE_FEATURE_BITPOS_ECOUNTERS = 3, /* feature with sub-features */
- CAPEXT_RTE_FEATURE_BITPOS_EVENT_LOG = 4,
-
- CAPEXT_RTE_FEATURE_BITPOS_LOGTRACE = 5,
- CAPEXT_RTE_FEATURE_BITPOS_HCHK = 6,
- CAPEXT_RTE_FEATURE_BITPOS_SMD = 7,
- CAPEXT_RTE_FEATURE_BITPOS_ETD = 8,
- CAPEXT_RTE_FEATURE_BITPOS_MAX
-};
#endif /* _dngl_ioctl_h_ */
diff --git a/include/epivers.h b/include/epivers.h
index c5012d5..cbefd4a 100644
--- a/include/epivers.h
+++ b/include/epivers.h
@@ -27,27 +27,27 @@
#define EPI_MINOR_VERSION 10
-#define EPI_RC_NUMBER 208
+#define EPI_RC_NUMBER 224
-#define EPI_INCREMENTAL_NUMBER 1
+#define EPI_INCREMENTAL_NUMBER 0
#define EPI_BUILD_NUMBER 0
-#define EPI_VERSION 103, 10, 208, 1
+#define EPI_VERSION 103, 10, 224, 0
-#define EPI_VERSION_NUM 0x670ad001
+#define EPI_VERSION_NUM 0x670ae000
-#define EPI_VERSION_DEV 103.10.208
+#define EPI_VERSION_DEV 103.10.224
/* Driver Version String, ASCII, 32 chars max */
#if defined (WLTEST)
-#define EPI_VERSION_STR "103.10.208.1 (wlan=r1018476 WLTEST)"
+#define EPI_VERSION_STR "103.10.224 (wlan=r1022286 WLTEST)"
#elif (defined (BCMDBG_ASSERT) && \
!defined (BCMDBG_ASSERT_DISABLED) && \
!defined (ASSERT_FP_DISABLE))
-#define EPI_VERSION_STR "103.10.208.1 (wlan=r1018476 ASSRT)"
+#define EPI_VERSION_STR "103.10.224 (wlan=r1022286 ASSRT)"
#else
-#define EPI_VERSION_STR "103.10.208.1 (wlan=r1018476)"
+#define EPI_VERSION_STR "103.10.224 (wlan=r1022286)"
#endif /* BCMINTERNAL */
#endif /* _epivers_h_ */
diff --git a/include/event_log_payload.h b/include/event_log_payload.h
index 87098e6..7360d1b 100644
--- a/include/event_log_payload.h
+++ b/include/event_log_payload.h
@@ -727,8 +727,35 @@ typedef enum {
WL_AMPDU_STATS_TYPE_TXHEx1 = 50, /* TX HE rate (Nss = 1) */
WL_AMPDU_STATS_TYPE_TXHEx2 = 51,
WL_AMPDU_STATS_TYPE_TXHEx3 = 52,
- WL_AMPDU_STATS_TYPE_TXHEx4 = 53
+ WL_AMPDU_STATS_TYPE_TXHEx4 = 53,
+
+ WL_AMPDU_STATS_TYPE_RXEHTx1 = 54, /* RX EHT rate (Nss = 1) */
+ WL_AMPDU_STATS_TYPE_RXEHTx2 = 55,
+ WL_AMPDU_STATS_TYPE_RXEHTx3 = 56,
+ WL_AMPDU_STATS_TYPE_RXEHTx4 = 57,
+ WL_AMPDU_STATS_TYPE_TXEHTx1 = 58, /* TX EHT rate (Nss = 1) */
+ WL_AMPDU_STATS_TYPE_TXEHTx2 = 59,
+ WL_AMPDU_STATS_TYPE_TXEHTx3 = 60,
+ WL_AMPDU_STATS_TYPE_TXEHTx4 = 61,
+
+ WL_AMPDU_STATS_TYPE_RX_EHT_SUOK = 62,
+ WL_AMPDU_STATS_TYPE_RX_EHT_SU_DENS = 63,
+ WL_AMPDU_STATS_TYPE_RX_EHT_MUMIMOOK = 64,
+ WL_AMPDU_STATS_TYPE_RX_EHT_MUMIMO_DENS = 65,
+ WL_AMPDU_STATS_TYPE_RX_EHT_DLOFDMA_OK = 66,
+ WL_AMPDU_STATS_TYPE_RX_EHT_DLOFDMA_DENS = 67,
+ WL_AMPDU_STATS_TYPE_RX_EHT_DLOFDMA_HIST = 68,
+
+ WL_AMPDU_STATS_TYPE_TX_EHT_MCSALL = 69,
+ WL_AMPDU_STATS_TYPE_TX_EHT_MCSOK = 70,
+ WL_AMPDU_STATS_TYPE_TX_EHT_MUALL = 71,
+ WL_AMPDU_STATS_TYPE_TX_EHT_MUOK = 72,
+ WL_AMPDU_STATS_TYPE_TX_EHT_RUBW = 73,
+ WL_AMPDU_STATS_TYPE_TX_EHT_EHT_PADDING = 74,
+
+ WL_AMPDU_STATS_TYPE_MLO_LINK_INFO = 75
} wl_ampdu_stat_enum_t;
+
#define WL_AMPDU_STATS_MAX_CNTS (64) /* Possible max number of counters in any sub-categary */
typedef struct {
@@ -1555,6 +1582,8 @@ typedef struct pciedev_htod_rx_ring_info_v1 {
uint16 htod_rx_buf_pool_item_cnt;
uint16 htod_rx_buf_pool_availcnt;
uint16 htod_rx_buf_pool_pend_item_cnt;
+ uint16 htod_rx_inuse_pool_r_ptr;
+ uint16 htod_rx_inuse_pool_w_ptr;
} pciedev_htod_rx_ring_info_v1_t;
/* WL RX fifo overflow info. Sent in triggered log events container above */
@@ -1592,6 +1621,7 @@ typedef struct wlc_rx_fifo_overflow_info_v1 {
uint64 rx_dma_posts_success_time[WLC_RX_FIFO_DMA_NUM]; /* in ns */
uint32 rx_dma_desc_count[WLC_RX_FIFO_DMA_NUM];
+ uint64 rx_sample_ts; /* Time in ns when sample taken */
} wlc_rx_fifo_overflow_info_v1_t;
/* Data structures for transferring channel switch histogram data to host */
diff --git a/include/event_log_set.h b/include/event_log_set.h
index 74b1f35..9e88900 100644
--- a/include/event_log_set.h
+++ b/include/event_log_set.h
@@ -416,6 +416,16 @@
#define EVENT_LOG_SET_MULTI_LINK_BLOCK_SIZE (EVENT_LOG_BLOCK_SIZE_1K)
#endif
+/* set 41: For all rate/rate selection related logging. */
+#define EVENT_LOG_SET_RATE_LOG (41u)
+#ifndef EVENT_LOG_SET_RATE_LOG_NUM_BLOCKS
+#define EVENT_LOG_SET_RATE_LOG_NUM_BLOCKS (2u)
+#endif
+
+#ifndef EVENT_LOG_SET_RATE_LOG_BLOCK_SIZE
+#define EVENT_LOG_SET_RATE_LOG_BLOCK_SIZE (EVENT_LOG_BLOCK_SIZE_1K)
+#endif
+
#ifndef NUM_EVENT_LOG_SETS
/* Set a maximum number of sets here. It is not dynamic for
* efficiency of the EVENT_LOG calls. Old branches could define
@@ -424,9 +434,9 @@
*/
#ifdef NUM_EVENT_LOG_SETS_V2
/* for v2, everything has became unsigned */
-#define NUM_EVENT_LOG_SETS (41u)
+#define NUM_EVENT_LOG_SETS (42u)
#else /* NUM_EVENT_LOG_SETS_V2 */
-#define NUM_EVENT_LOG_SETS (41)
+#define NUM_EVENT_LOG_SETS (42)
#endif /* NUM_EVENT_LOG_SETS_V2 */
#endif /* NUM_EVENT_LOG_SETS */
diff --git a/include/event_log_tag.h b/include/event_log_tag.h
index 343a847..0fb6bb2 100644
--- a/include/event_log_tag.h
+++ b/include/event_log_tag.h
@@ -616,8 +616,23 @@
/* Additional RRM logs for 802.11k/v/r */
#define EVENT_LOG_TAG_RRM_11KVR_RPT 466
+/* Tx power mitigation */
+#define EVENT_LOG_TAG_TXPWR_MITIGATION 467
+
+#define EVENT_LOG_TAG_PHY_AZ_INFO_BASIC 468
+#define EVENT_LOG_TAG_PHY_AZ_INFO_MIMO 469
+#define EVENT_LOG_TAG_PHY_AZ_INFO_PT 470
+#define EVENT_LOG_TAG_PHY_AZ_INFO_OUTLIER 471
+
+/* Rate and rate selection (for both TX/RX) tags */
+#define EVENT_LOG_TAG_RATE_ERROR 472
+#define EVENT_LOG_TAG_RATE 473
+#define EVENT_LOG_TAG_RATE_INFO 474
+#define EVENT_LOG_TAG_RATE_TRACE 475
+
+
/* EVENT_LOG_TAG_MAX = Set to the same value of last tag, not last tag + 1 */
-#define EVENT_LOG_TAG_MAX 466
+#define EVENT_LOG_TAG_MAX 475
typedef enum wl_el_set_type_def {
EVENT_LOG_SET_TYPE_DEFAULT = 0, /* flush the log buffer when it is full - Default option */
diff --git a/include/hndoobr.h b/include/hndoobr.h
index d4eca08..1e532a7 100644
--- a/include/hndoobr.h
+++ b/include/hndoobr.h
@@ -86,8 +86,9 @@ typedef volatile struct hndoobr_percore_reg {
#define OOBR_PERCORE_CORENCONFIG_INTOUTPUTS_SHIFT 8u
typedef volatile struct hndoobr_reg {
- uint32 capability; /* 0x00 */
- uint32 reserved[3];
+ uint32 capability; /* 0x00 - 0x03 */
+ uint32 capability2; /* 0x04 - 0x07 */
+ uint32 reserved[2];
uint32 intstatus[4]; /* 0x10 - 0x1c */
uint32 reserved1[4]; /* 0x20 - 0x2c */
uint32 topintdestsel[4]; /* 0x30 - 0x3c */
diff --git a/include/linux_pkt.h b/include/linux_pkt.h
index 782b0cf..71a5554 100644
--- a/include/linux_pkt.h
+++ b/include/linux_pkt.h
@@ -57,12 +57,12 @@
#define PKTDUP(osh, skb) osl_pktdup((osh), (skb))
#endif /* BCM_OBJECT_TRACE */
#endif /* BCMDBG_CTRACE */
-#define PKTLIST_DUMP(osh, buf) BCM_REFERENCE(osh)
+#define PKTLIST_DUMP(osh, buf, bufsz) BCM_REFERENCE(osh)
#define PKTDBG_TRACE(osh, pkt, bit) BCM_REFERENCE(osh)
#else /* BCMDBG_PKT pkt logging for debugging */
#define PKTGET(osh, len, send) linux_pktget((osh), (len), __LINE__, __FILE__)
#define PKTDUP(osh, skb) osl_pktdup((osh), (skb), __LINE__, __FILE__)
-#define PKTLIST_DUMP(osh, buf) osl_pktlist_dump(osh, buf)
+#define PKTLIST_DUMP(osh, buf, bufsz) osl_pktlist_dump(osh, buf, bufsz)
#define BCMDBG_PTRACE
#define PKTLIST_IDX(skb) ((uint16 *)((char *)PKTTAG(skb) + \
sizeof(((struct sk_buff*)(skb))->cb) - sizeof(uint16)))
@@ -268,7 +268,7 @@ extern void *osl_pkt_frmnative(osl_t *osh, void *skb, int line, char *file);
extern void *osl_pktdup(osl_t *osh, void *skb, int line, char *file);
extern void osl_pktlist_add(osl_t *osh, void *p, int line, char *file);
extern void osl_pktlist_remove(osl_t *osh, void *p);
-extern char *osl_pktlist_dump(osl_t *osh, char *buf);
+extern char *osl_pktlist_dump(osl_t *osh, char *buf, uint bufsz);
#ifdef BCMDBG_PTRACE
extern void osl_pkttrace(osl_t *osh, void *pkt, uint16 bit);
#endif /* BCMDBG_PTRACE */
@@ -340,7 +340,7 @@ extern struct sk_buff *osl_pkt_tonative(osl_t *osh, void *pkt);
#define PKTGET(osh, len, send) linux_pktget((osh), (len), __LINE__, __FILE__)
#define PKTDUP(osh, skb) osl_pktdup((osh), (skb), __LINE__, __FILE__)
#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative((osh), (skb), __LINE__, __FILE__)
-#define PKTLIST_DUMP(osh, buf) osl_pktlist_dump(osh, buf)
+#define PKTLIST_DUMP(osh, buf, bufsz) osl_pktlist_dump(osh, buf, bufsz)
#define PKTDBG_TRACE(osh, pkt, bit) BCM_REFERENCE(osh)
#else /* BCMDBG_PKT */
#ifdef BCMDBG_CTRACE
@@ -357,7 +357,7 @@ extern struct sk_buff *osl_pkt_tonative(osl_t *osh, void *pkt);
#endif /* BCM_OBJECT_TRACE */
#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative((osh), (skb))
#endif /* BCMDBG_CTRACE */
-#define PKTLIST_DUMP(osh, buf) ({BCM_REFERENCE(osh); BCM_REFERENCE(buf);})
+#define PKTLIST_DUMP(osh, buf, bufsz) ({BCM_REFERENCE(osh); BCM_REFERENCE(buf);})
#define PKTDBG_TRACE(osh, pkt, bit) ({BCM_REFERENCE(osh); BCM_REFERENCE(pkt);})
#endif /* BCMDBG_PKT */
#if defined(BCM_OBJECT_TRACE)
@@ -427,7 +427,7 @@ extern struct sk_buff *osl_pkt_tonative(osl_t *osh, void *pkt);
extern bool osl_pktshared(void *skb);
#ifdef BCMDBG_PKT /* pkt logging for debugging */
-extern char *osl_pktlist_dump(osl_t *osh, char *buf);
+extern char *osl_pktlist_dump(osl_t *osh, char *buf, uint bufsz);
extern void osl_pktlist_add(osl_t *osh, void *p, int line, char *file);
extern void osl_pktlist_remove(osl_t *osh, void *p);
#endif /* BCMDBG_PKT */
diff --git a/include/nan.h b/include/nan.h
index 48c8a5f..24a4def 100644
--- a/include/nan.h
+++ b/include/nan.h
@@ -285,6 +285,17 @@ typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_svc_descriptor_attr_s {
/* Optional fields follow */
} BWL_POST_PACKED_STRUCT wifi_nan_svc_descriptor_attr_t;
+#define NAN_SVC_INFO_TYPE_RSVD 0u
+#define NAN_SVC_INFO_TYPE_BONJOUR 1u
+#define NAN_SVC_INFO_TYPE_GENERIC 2u
+
+/* Service info field */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_svc_info_field_s {
+ uint8 oui[DOT11_OUI_LEN]; /* 0x50-6F-9A */
+ uint8 svc_protocol;
+ uint8 svc_spec_info[];
+} BWL_POST_PACKED_STRUCT wifi_nan_svc_info_field_t;
+
/* IBSS attribute */
typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ibss_attr_s {
/* Attribute ID - 0x07. */
diff --git a/include/phy_event_log_payload.h b/include/phy_event_log_payload.h
index 0105670..6746aa4 100644
--- a/include/phy_event_log_payload.h
+++ b/include/phy_event_log_payload.h
@@ -1026,6 +1026,9 @@ typedef struct phy_periodic_counters_v255 {
uint32 rxbeaconobss; /* number of OBSS beacons received */
uint32 rxctsucast; /* number of unicast CTS addressed to the MAC (good FCS) */
uint32 rxrtsucast; /* number of unicast RTS addressed to the MAC (good FCS) */
+ uint32 rxauth; /* Number of RX AUTH */
+ uint32 rxdeauth; /* Number of RX DEAUTH */
+ uint32 rxassocrsp; /* Number of RX ASSOC response */
/* TX related */
uint32 txallfrm; /* total number of frames sent, incl. Data, ACK, RTS, CTS,
@@ -1044,6 +1047,9 @@ typedef struct phy_periodic_counters_v255 {
uint32 txucast; /* number of unicast tx expecting response
* other than cts/cwcts
*/
+ uint32 txauth; /* Number of TX AUTH */
+ uint32 txdeauth; /* Number of TX DEAUTH */
+ uint32 txassocreq; /* Number of TX ASSOC request */
/* TX error related */
uint32 txrtsfail; /* RTS TX failure count */
@@ -1104,26 +1110,30 @@ typedef struct phy_periodic_counters_v255 {
uint32 p2ptbttmiss; /* TBTT coming when the radio is on an off channel */
uint32 noise_iqest_to; /* Count of IQ Est timeout during noise measurement */
uint32 ctmode_ufc_cnt; /* Underflow cnt in ctmode */
-
+ uint32 fourwayfail; /* FourWayHandshakeFailures */
uint16 missbcn_dbg; /* Number of beacon missed to receive */
uint8 sc_dccal_incc_cnt; /* scan dccal counter */
uint8 sc_rxiqcal_skip_cnt; /* scan rxiqcal counter */
uint8 sc_noisecal_incc_cnt; /* scan noise cal counter */
- uint8 debug_01; /* Misc general purpose debug counters */
- uint8 debug_02; /* Misc general purpose debug counters */
- uint8 debug_03; /* Misc general purpose debug counters */
+ /* BFR path */
+ uint8 bfr_last_snd_success; /* Last sounding successful val */
+ uint16 bfr_txndpa; /* null data packet announcements */
+ uint16 bfr_txndp; /* null data packets */
+ uint16 bfr_rxsf;
+ /* BFE path */
+ uint16 bfe_rxndpa_u; /* unicast NDPAs */
+ uint16 bfe_rxndpa_m; /* multicast NDPAs */
+ uint16 bfe_rpt; /* beamforming reports */
+ uint16 bfe_txsf; /* subframes */
+ /* Tx duty cycle shmems */
+ uint16 txduty_ratio_ofdm;
+ uint16 txduty_ratio_cck;
/* Misc general purpose debug counters (will be used for future debugging) */
- uint32 debug_04;
- uint32 debug_05;
- uint32 debug_06;
- uint32 debug_07;
- uint16 debug_08;
- uint16 debug_09;
- uint16 debug_10;
- uint16 debug_11;
+ uint16 debug_01;
+ uint16 debug_02;
} phy_periodic_counters_v255_t;
typedef struct phycal_log_cmn {
@@ -2819,7 +2829,7 @@ typedef struct phy_periodic_log_cmn_v255 {
uint16 femtemp_read_fail_counter; /* Fem temparature read fail counter */
uint16 phy_log_counter;
uint16 noise_mmt_overdue; /* Count up if ucode noise mmt is overdue for 5 sec */
- uint16 chan_switch_tm; /* Channel switch time */
+ uint16 chan_switch_dur; /* Channel switch time */
uint16 dcc_hcfail; /* dcc health check failure count */
uint16 dcc_calfail; /* dcc failure count */
@@ -2832,13 +2842,6 @@ typedef struct phy_periodic_log_cmn_v255 {
uint16 counter_noise_interrupt_cleared; /* interrupt cleared on channel change */
uint16 counter_noise_cal_cancelled; /* trigger cancelled on channel change */
- /* Misc general purpose debug counters (will be used for future debugging) */
- uint16 debug_01;
- uint16 debug_02;
- uint16 debug_03;
- uint16 debug_04;
- uint16 debug_05;
-
uint16 macsusp_cnt; /* mac suspend counter */
uint8 amtbitmap; /* AMT status bitamp */
@@ -2879,7 +2882,8 @@ typedef struct phy_periodic_log_cmn_v255 {
bool phycal_disable; /* Set if calibration is disabled */
bool hwpwrctrlen; /* tx hwpwrctrl enable */
uint8 ocl_en_status; /* OCL requested state and OCL HW state */
-
+ uint16 timeoutstatus;
+ uint32 measurehold; /* PHY hold activities */
uint32 ed_duration; /* ccastats: ed_duration */
uint16 ed_crs_status; /* Status of ED and CRS during noise cal */
uint16 preempt_status1; /* status of preemption */
@@ -2887,8 +2891,6 @@ typedef struct phy_periodic_log_cmn_v255 {
uint16 preempt_status3; /* status of preemption */
uint16 preempt_status4; /* status of preemption */
- uint16 timeoutstatus;
- uint32 measurehold; /* PHY hold activities */
uint16 pktprocdebug;
uint16 pktprocdebug2;
@@ -2922,17 +2924,26 @@ typedef struct phy_periodic_log_cmn_v255 {
uint16 gci_rst_rmac_rx;
uint16 gci_rst_tx_rx;
+ uint16 rspfrm_ed_txncl_cnt; /* Response frame not sent due to ED */
+
+ uint8 rfem_rxmode_curr_hwstate;
+ uint8 rfem_rxmode_bands_req; /* mode as requested by SW layer */
+ uint8 rfem_rxmode_bands_applied; /* mode currently configured in HW */
+
+ uint8 lpc_status; /* Flag to enable/disable LPC, and runtime flag status */
+
uint32 rxsense_disable_req_ch; /* channel disable requests */
uint32 ocl_disable_reqs; /* OCL disable bitmap */
- uint32 interference_mode; /* interference mitigation mode */
- uint32 power_mode; /* LP/VLP logging */
-
- uint16 rspfrm_ed_txncl_cnt; /* Response frame not sent due to ED */
int16 last_cal_temp;
uint8 cal_reason; /* reason for the cal */
uint8 cal_suppressed_cntr_ed; /* counter including ss, mp cals, MSB is current state */
- uint8 lpc_status; /* Flag to enable/disable LPC, and runtime flag status */
+ int8 rxsense_noise_idx; /* rxsense detection threshold desense index */
+ int8 rxsense_offset; /* rxsense min power desense index */
+
+ uint8 rccal_lpf_tmout;
+ uint8 rccal_tia_tmout;
+ uint8 rccal_rxpll_tmout;
uint8 noise_cal_mode; /* noisecal mode */
uint16 noise_cal_timeout; /* noisecal timeout */
@@ -2941,25 +2952,20 @@ typedef struct phy_periodic_log_cmn_v255 {
uint16 channel_active; /* Channel active status */
- int8 rxsense_noise_idx; /* rxsense detection threshold desense index */
- int8 rxsense_offset; /* rxsense min power desense index */
- uint8 phylog_noise_mode; /* Noise mode used */
- uint8 rccal_lpf_tmout;
- uint8 rccal_tia_tmout;
- uint8 rccal_rxpll_tmout;
+ uint32 interference_mode; /* interference mitigation mode */
+ uint32 power_mode; /* LP/VLP logging */
- uint8 rfem_rxmode_curr_hwstate;
- uint8 rfem_rxmode_bands_req; /* mode as requested by SW layer */
- uint8 rfem_rxmode_bands_applied; /* mode currently configured in HW */
+ uint32 temp_sense_cnt;
+ uint16 ncap_misc;
+
+ uint16 nap_disable_reqs; /* NAP disable bitmap */
+ uint8 nap_en_status; /* NAP enable status */
+ uint8 phylog_noise_mode; /* Noise mode used */
/* Misc general purpose debug counters (will be used for future debugging) */
- uint8 debug_06;
- uint8 debug_07;
- uint8 debug_08;
- uint32 debug_09;
- uint32 debug_10;
- uint32 debug_11;
- uint32 debug_12;
+ uint8 debug_01;
+ uint8 debug_02;
+ uint16 debug_03;
} phy_periodic_log_cmn_v255_t;
typedef struct phy_periodic_log_core {
@@ -3243,12 +3249,6 @@ typedef struct phy_periodic_log_core_v255 {
int16 psb; /* psb read during dccal health check */
int16 txcap; /* Txcap value */
- /* Misc general purpose debug counters (will be used for future debugging) */
- uint16 debug_01;
- uint16 debug_02;
- uint16 debug_03;
- uint16 debug_04;
-
uint8 pktproc; /* pktproc read during dccal health check */
uint8 baseindxval; /* TPC Base index */
int8 tgt_pwr; /* Programmed Target power */
@@ -3261,11 +3261,13 @@ typedef struct phy_periodic_log_core_v255 {
int8 noise_level_inst; /* instantaneous noise cal pwr */
int8 estpwr; /* tx powerDet value */
int8 crsmin_th_idx; /* idx used to lookup crs min thresholds */
+
int8 ed_threshold; /* ed threshold */
uint16 ed20_crs; /* ED-CRS status */
uint16 curr_tssival; /* TxPwrCtrlInit_path[01].TSSIVal */
uint16 pwridx_init; /* TxPwrCtrlInit_path[01].pwrIndex_init_path[01] */
+
uint16 auxphystats;
uint16 phystatsgaininfo;
uint16 flexpwrAFE;
@@ -3276,15 +3278,15 @@ typedef struct phy_periodic_log_core_v255 {
uint16 flexgaininfo_A;
uint16 bad_txbaseidx_cnt; /* cntr for tx_baseidx=127 in healthcheck */
+ uint32 rfseq_rst_ctr; /* rfseq reset counter */
uint16 tpc_vmid;
-
- uint16 debug_05; /* multipurpose debug register */
uint8 tpc_av;
/* Misc general purpose debug counters (will be used for future debugging) */
- uint8 debug_06;
- uint8 debug_07;
- uint8 debug_08;
+ uint8 debug_01;
+ uint32 debug_02;
+ uint16 debug_03;
+ uint16 debug_04;
int8 phy_noise_pwr_array[PHY_NOISE_PWR_ARRAY_SIZE]; /* noise buffer array */
} phy_periodic_log_core_v255_t;
@@ -3439,17 +3441,6 @@ typedef struct wlc_btc_shared_stats_v255 {
uint16 bt5g_switch_fail_cnt; /* BT 5G Coex Switch Fail Cnt */
uint16 bt5g_no_defer_cnt; /* BT 5G Coex No Defer Count */
uint16 bt5g_switch_reason_bm; /* BT 5G Switch Reason Bitmap */
-
- /* Misc general purpose debug counters (will be used for future debugging) */
- uint8 debug_01;
- uint16 debug_02;
- uint16 debug_03;
- uint16 debug_04;
- uint16 debug_05;
- uint8 debug_06;
- uint8 debug_07;
- uint8 debug_08;
- uint8 debug_09;
} wlc_btc_shared_stats_v255_t;
/* BTCX Statistics for PHY Logging */
@@ -3906,23 +3897,23 @@ typedef struct phy_periodic_scca_stats_v4 {
/* SmartCCA related PHY Logging */
typedef struct phy_periodic_scca_stats_v255 {
- uint32 asym_intf_ncal_time;
- uint32 asym_intf_host_req_mit_turnon_time;
- int32 asym_intf_ed_thresh;
+ uint32 asym_intf_ncal_time;
+ uint32 asym_intf_host_req_mit_turnon_time;
+ int32 asym_intf_ed_thresh;
- uint16 crsminpoweru0; // crsmin thresh
- uint16 crsminpoweroffset0; // ac_offset core0
- uint16 crsminpoweroffset1; // ac_offset core1
- uint16 ed_crsEn; // phyreg(ed_crsEn)
- uint16 nvcfg0; // LLR deweighting coefficient
- uint16 SlnaRxMaskCtrl0;
- uint16 SlnaRxMaskCtrl1;
- uint16 CRSMiscellaneousParam;
- uint16 AntDivConfig2059;
- uint16 HPFBWovrdigictrl;
- uint16 save_SlnaRxMaskCtrl0;
- uint16 save_SlnaRxMaskCtrl1;
- uint16 asym_intf_ncal_req_chspec; /* channel request noisecal */
+ uint16 crsminpoweru0; /* crsmin thresh */
+ uint16 crsminpoweroffset0; /* ac_offset core0 */
+ uint16 crsminpoweroffset1; /* ac_offset core1 */
+ uint16 ed_crsEn; /* phyreg(ed_crsEn) */
+ uint16 nvcfg0; /* LLR deweighting coefficient */
+ uint16 SlnaRxMaskCtrl0;
+ uint16 SlnaRxMaskCtrl1;
+ uint16 CRSMiscellaneousParam;
+ uint16 AntDivConfig2059;
+ uint16 HPFBWovrdigictrl;
+ uint16 save_SlnaRxMaskCtrl0;
+ uint16 save_SlnaRxMaskCtrl1;
+ uint16 asym_intf_ncal_req_chspec; /* channel request noisecal */
/* asym_intf_stats includes the following bits:
* b[0]: bool asym_intf_rx_noise_mit_on; // SmartCCA Rx mititagion enabled
* b[1]: bool asym_intf_tx_smartcca_on; // SmartCCA Tx mititagion enabled
@@ -3936,27 +3927,33 @@ typedef struct phy_periodic_scca_stats_v255 {
* b[9]: bool asym_intf_host_enable; // Host control related variable
* b[10]: bool asym_intf_pending_host_req; // Set request pending if clk not present
*/
- uint16 asym_intf_stats;
+ uint16 asym_intf_stats;
- uint8 btc_mode; /* from bt_desense in gainovr_shm_config() */
+ uint8 btc_mode; /* from bt_desense in gainovr_shm_config() */
/* noise at antenna from phy_ac_noise_calc() */
- int8 noisecalc_cmplx_pwr_dbm[2];
- int8 asym_intf_ant_noise[2];
+ int8 noisecalc_cmplx_pwr_dbm[2];
+ int8 asym_intf_ant_noise[2];
- uint8 asym_intf_tx_smartcca_cm;
- uint8 asym_intf_rx_noise_mit_cm;
- int8 asym_intf_avg_noise[2];
- int8 asym_intf_latest_noise[2];
+ uint8 asym_intf_tx_smartcca_cm;
+ uint8 asym_intf_rx_noise_mit_cm;
+
+ /* LTE asymmetric jammer paramaters */
+ bool asym_intf_jammer_en;
+ uint8 asym_intf_jammer_cm;
+ int8 asym_intf_jammer_pwr[2];
+
+ int8 asym_intf_avg_noise[2];
+ int8 asym_intf_latest_noise[2];
/* used to calculate noise_delta for rx mitigation on/off */
- int8 asym_intf_prev_noise_lvl[2];
- uint8 asym_intf_ant_noise_idx;
- uint8 asym_intf_least_core_idx;
+ int8 asym_intf_prev_noise_lvl[2];
+ uint8 asym_intf_ant_noise_idx;
+ uint8 asym_intf_least_core_idx;
uint8 asym_intf_pending_host_req_type;
/* Set request pending if clk not present */
uint16 asym_intf_ncal_crs_stat;
uint8 asym_intf_ncal_crs_stat_idx;
- uint8 pad;
+ uint8 debug_01;
} phy_periodic_scca_stats_v255_t;
#define PHY_PERIODIC_LOG_VER1 (1u)
@@ -4426,6 +4423,31 @@ typedef struct phy_periodic_log_v27 {
phy_periodic_scca_stats_v4_t scca_counters_peri_log;
} phy_periodic_log_v27_t;
+#define PHY_PERIODIC_LOG_VER28 28u
+typedef struct phy_periodic_log_v28 {
+ uint8 version; /* Logging structure version */
+ uint8 numcores; /* Number of cores for which core specific data present */
+ uint16 length; /* Length of the structure */
+
+ /* Logs general PHY parameters */
+ phy_periodic_log_cmn_v13_t phy_perilog_cmn;
+
+ /* Logs ucode counters and NAVs */
+ phy_periodic_counters_v11_t counters_peri_log;
+
+ /* log data for BTcoex */
+ phy_periodic_btc_stats_v4_t phy_perilog_btc_stats;
+
+ /* log data for obss/dynbw */
+ phy_periodic_obss_stats_v4_t phy_perilog_obss_stats;
+
+ /* Logs data pertaining to each core */
+ phy_periodic_log_core_v9_t phy_perilog_core[2];
+
+ /* log data for smartCCA */
+ phy_periodic_scca_stats_v4_t scca_counters_peri_log;
+} phy_periodic_log_v28_t;
+
/* ************************************************** */
/* The version 255 for the logging data structures */
/* is for use in trunk ONLY. In release branches the */
diff --git a/include/wlioctl.h b/include/wlioctl.h
index 57db288..1b9dbf6 100644
--- a/include/wlioctl.h
+++ b/include/wlioctl.h
@@ -1034,6 +1034,64 @@ typedef struct wl_scan_params_v3 {
#define WL_MAX_ROAMSCAN_V3_DATSZ \
(WL_SCAN_PARAMS_V3_FIXED_SIZE + (WL_NUMCHANNELS * sizeof(uint16)))
+/* changes in wl_scan_params_v4 as comapred to wl_scan_params (v3)
+ * adding scan_type_ext field.
+ */
+typedef struct wl_scan_params_v4 {
+ uint16 version; /* Version of wl_scan_params, change value of
+ * WL_SCAN_PARAM_VERSION on version update
+ */
+ uint16 length; /* length of structure wl_scan_params_v1_t
+ * without implicit pad
+ */
+ wlc_ssid_t ssid; /**< default: {0, ""} */
+ struct ether_addr bssid; /**< default: bcast */
+ int8 bss_type; /**< default: any,
+ * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT
+ */
+ uint8 ssid_type; /**< ssid_type_flag ,0 use default, and flags specified
+ * WL_SCAN_SSID_FLAGS
+ */
+ uint32 scan_type; /**< flags, 0 use default, and flags specified in
+ * WL_SCANFLAGS_XXX
+ */
+ uint32 scan_type_ext; /**< flags, 0 use default, and flags specified in
+ * WL_SCANFLAGS_EXT_XXX
+ */
+ int32 nprobes; /**< -1 use default, number of probes per channel */
+ int32 active_time; /**< -1 use default, dwell time per channel for
+ * active scanning
+ */
+ int32 passive_time; /**< -1 use default, dwell time per channel
+ * for passive scanning
+ */
+ int32 home_time; /**< -1 use default, dwell time for the home channel
+ * between channel scans
+ */
+ int32 channel_num; /**< count of channels and ssids that follow
+ *
+ * low half is count of channels in channel_list, 0
+ * means default (use all available channels)
+ *
+ * high half is entries in wlc_ssid_t array that
+ * follows channel_list, aligned for int32 (4 bytes)
+ * meaning an odd channel count implies a 2-byte pad
+ * between end of channel_list and first ssid
+ *
+ * if ssid count is zero, single ssid in the fixed
+ * parameter portion is assumed, otherwise ssid in
+ * the fixed portion is ignored
+ */
+ uint16 channel_list[]; /**< list of chanspecs */
+} wl_scan_params_v4_t;
+
+#define WL_SCAN_PARAMS_VERSION_V4 4
+
+/** size of wl_scan_params not including variable length array */
+#define WL_SCAN_PARAMS_V4_FIXED_SIZE (OFFSETOF(wl_scan_params_v4_t, channel_list))
+#define WL_MAX_ROAMSCAN_V4_DATSZ \
+ (WL_SCAN_PARAMS_V4_FIXED_SIZE + (WL_NUMCHANNELS * sizeof(uint16)))
+
#define ISCAN_REQ_VERSION_V1 1
#define ISCAN_REQ_VERSION_V2 2
@@ -1126,6 +1184,7 @@ typedef struct iscan_buf {
#define ESCAN_REQ_VERSION_V1 1
#define ESCAN_REQ_VERSION_V2 2
#define ESCAN_REQ_VERSION_V3 3
+#define ESCAN_REQ_VERSION_V4 4
typedef struct wl_escan_params_v1 {
uint32 version;
@@ -1148,9 +1207,17 @@ typedef struct wl_escan_params_v3 {
wl_scan_params_v3_t params;
} wl_escan_params_v3_t;
+typedef struct wl_escan_params_v4 {
+ uint32 version;
+ uint16 action;
+ uint16 sync_id;
+ wl_scan_params_v4_t params;
+} wl_escan_params_v4_t;
+
#define WL_ESCAN_PARAMS_V1_FIXED_SIZE (OFFSETOF(wl_escan_params_v1_t, params) + sizeof(wlc_ssid_t))
#define WL_ESCAN_PARAMS_V2_FIXED_SIZE (OFFSETOF(wl_escan_params_v2_t, params) + sizeof(wlc_ssid_t))
#define WL_ESCAN_PARAMS_V3_FIXED_SIZE (OFFSETOF(wl_escan_params_v3_t, params) + sizeof(wlc_ssid_t))
+#define WL_ESCAN_PARAMS_V4_FIXED_SIZE (OFFSETOF(wl_escan_params_v4_t, params) + sizeof(wlc_ssid_t))
/** event scan reduces amount of SOC memory needed to store scan results */
typedef struct wl_escan_result_v109 {
@@ -3940,6 +4007,7 @@ typedef struct wl_mws_ocl_override {
#define OCL_DISABLED_SCPEND (1u << 15u) /* Disabled due to scan pending */
#define OCL_DISABLED_EMLSR (1u << 16u) /* Disabled due to EMLSR enabled */
#define OCL_DISABLED_BTBPHYWAR (1u << 17u) /* Disabled during BT eSCO traffic */
+#define OCL_DISABLED_SCCA_MIT (1u << 18u) /* Disabled when SCCA enabled, sccatxlbt=1 */
/* Bits for hw_status */
@@ -4391,6 +4459,7 @@ typedef struct dvfs_hist_v1 {
#define WL_DVFS_REASON_WD 0x1000u /* WD */
#define WL_DVFS_REASON_SOFTAP 0x2000u /* SoftAP */
#define WL_DVFS_REASON_PHYBW 0x4000u /* Channel BW Change */
+#define WL_DVFS_REASON_MCHAN_ACTIVE 0x8000u /* Mchan Active */
/*
* Join preference iovar value is an array of tuples. Each tuple has a one-byte type,
@@ -5428,6 +5497,19 @@ typedef struct wl_mesh_pkt_cnt_v1 {
uint32 tx_act_group_mesh_da; // RA is bcast/mcast
} wl_mesh_pkt_cnt_v1_t;
+#define WL_SC_SLIM_SCAN_CNT_VER_V1 1u
+
+typedef struct wl_sc_slim_scan_cnts_v1 {
+ uint16 version;
+ uint16 len;
+ uint32 rx_start; /* Rx start/frame complete cnt */
+ uint32 good_fcs; /* Good FCS frame cnt */
+ uint32 bad_plcp; /* Frame drop cnt due to bad plcp */
+ uint32 pfifo_nempty; /* Frame drop cnt due to PFIFO not empty */
+ uint32 framelen_drop; /* Frame drop cnt due to excess frame length */
+ uint32 fc_addr_chckfail; /* Frame drop cnt due to FC/addr check fail */
+} wl_sc_slim_scan_cnts_v1_t;
+
/* WL_IFSTATS_XTLV_WL_SLICE_TXBF */
/* beamforming counters version 1 */
#define TXBF_ECOUNTERS_V1 (1u)
@@ -7817,14 +7899,10 @@ enum {
#define DEFAULT_REPEAT 10
#define DEFAULT_EXP 2
-#define PFN_PARTIAL_SCAN_BIT 0
-#define PFN_PARTIAL_SCAN_MASK 1
-
#define PFN_SWC_RSSI_WINDOW_MAX 8
#define PFN_SWC_MAX_NUM_APS 16
#define PFN_HOTLIST_MAX_NUM_APS 64
-
#define MAX_EPNO_HIDDEN_SSID 8
#define MAX_WHITELIST_SSID 2
@@ -8008,6 +8086,7 @@ typedef struct wl_pfn_swc_results {
uint32 total_count; /**< Total expected results */
wl_pfn_significant_net_t list[];
} wl_pfn_swc_results_t;
+
typedef struct wl_pfn_net_info_bssid_v1 {
struct ether_addr BSSID;
uint8 channel; /**< channel number only */
@@ -14912,6 +14991,7 @@ enum wl_nan_cmd_xtlv_id {
#define WL_NAN_CMD_DBG_COMP_ID 0x0f
#define WL_NAN_CMD_PAIRING_COMP_ID 0x10
#define WL_NAN_CMD_PAIRING_PEER_COMP_ID 0x11
+#define WL_NAN_CMD_TETHER_COMP_ID 0x12
#define WL_NAN_CMD_COMP_SHIFT 8
@@ -15044,7 +15124,9 @@ typedef enum wl_nan_tlv {
WL_NAN_XTLV_PAIRING_PEER_NIK = NAN_CMD(WL_NAN_CMD_PAIRING_COMP_ID, 0x05),
/* XTLV list to include NPK, local NIK and peer NIK */
- WL_NAN_XTLV_PAIRING_XTLV_LIST = NAN_CMD(WL_NAN_CMD_PAIRING_COMP_ID, 0x06)
+ WL_NAN_XTLV_PAIRING_XTLV_LIST = NAN_CMD(WL_NAN_CMD_PAIRING_COMP_ID, 0x06),
+
+ WL_NAN_XTLV_TETHER = NAN_CMD(WL_NAN_CMD_TETHER_COMP_ID, 0x01) /* Tether xtlv */
} wl_nan_tlv_t;
/* Sub Module ID's for NAN */
@@ -18116,10 +18198,10 @@ typedef enum {
WL_WSEC_INFO_SAE_GROUPS = (WL_WSEC_INFO_BSS_BASE + 0xD),
WL_WSEC_INFO_OCV = (WL_WSEC_INFO_BSS_BASE + 0xE),
WL_WSEC_INFO_BSS_KEY_IDLE_TIME = (WL_WSEC_INFO_BSS_BASE + 0xF),
- WL_WSEC_INFO_BSS_INCLUDE_RSNXE = (WL_WSEC_INFO_BSS_BASE + 0x10), /**<
- Include RSNXE in pure
- WPA-PSK mode
- */
+ /* Include RSNXE in pure WPA-PSK mode */
+ WL_WSEC_INFO_BSS_INCLUDE_RSNXE = (WL_WSEC_INFO_BSS_BASE + 0x10),
+ /* set/get OWE DH group Id */
+ WL_WSEC_INFO_OWE_DH_GROUP = (WL_WSEC_INFO_BSS_BASE + 0x11),
/*
* ADD NEW ENUM ABOVE HERE
@@ -19819,7 +19901,8 @@ typedef wl_ftm_iov_t wl_proxd_iov_t;
/** status - TBD BCME_ vs proxd status - range reserved for BCME_ */
enum {
- WL_PROXD_E_LAST = -1058,
+ WL_PROXD_E_LAST = -1059,
+ WL_PROXD_E_CHANSW = -1059,
WL_PROXD_E_PKTFREED = -1058,
WL_PROXD_E_ASSOC_INPROG = -1057,
WL_PROXD_E_NOAVAIL = -1056,
@@ -21031,10 +21114,11 @@ typedef enum wl_interface_type {
* Bit 5 of flags field indicates mesh usage on this interface
*/
#define WL_INTERFACE_MESH_USE (1u << 5u)
+#define WL_INTERFACE_NAN_TETHER_USE (1u << 6u)
#define WL_INTERFACE_IOV_ALLOWED_FLAGS (WL_INTERFACE_CREATE_AP | WL_INTERFACE_MAC_USE | \
WL_INTERFACE_WLC_INDEX_USE | WL_INTERFACE_IF_INDEX_USE | \
- WL_INTERFACE_BSSID_INDEX_USE | WL_INTERFACE_MESH_USE)
+ WL_INTERFACE_BSSID_INDEX_USE | WL_INTERFACE_MESH_USE | WL_INTERFACE_NAN_TETHER_USE)
#define WL_INTERFACE_IOV_FLAGS_VALID(_f) (((_f) & ~WL_INTERFACE_IOV_ALLOWED_FLAGS) == 0u)
@@ -24153,6 +24237,8 @@ typedef enum wl_hc_dd_type {
WL_HC_DD_NAN =11, /* NAN health check */
WL_HC_DD_CHSW =12, /* Channel Switch health check */
WL_HC_DD_LHL =13, /* LHL timer health check */
+ WL_HC_DD_RX_STALL_V3 =14, /* RX stall check v3 */
+ WL_HC_DD_TX_STALL_V2 =15, /* TX stall check V2 */
WL_HC_DD_MAX
} wl_hc_dd_type_t;
@@ -24196,6 +24282,23 @@ typedef struct {
uint32 tx_failure_all;
} wl_tx_hc_info_t;
+/* Health Check report v2 structure for Tx packet failure check */
+typedef struct {
+ uint16 type; /* WL_HC_DD_TX_STALL_V2 */
+ uint16 length;
+ uint8 if_idx; /* interface index on which issue is reported */
+ uint8 ac; /* access category on which this problem is seen */
+ uint8 link_idx;
+ uint8 PAD;
+ uint32 stall_bitmap_low;
+ uint32 stall_bitmap_high;
+ uint32 tx_all;
+ uint32 tx_failure_all;
+ uint32 threshold;
+ struct ether_addr peer_ea;
+ uint8 PAD[2]; /* Reserved */
+} wl_tx_hc_info_v2_t;
+
/* Health Check report structure for Rx dropped packet failure check */
typedef struct {
uint16 type; /* WL_HC_RX_DD_STALL */
@@ -24221,6 +24324,22 @@ typedef struct {
uint8 PAD[2]; /* Reserved */
} wl_rx_hc_info_v2_t;
+/* Health Check report structure for Rx dropped packet failure check */
+typedef struct {
+ uint16 type; /* WL_HC_RX_DD_STALL_V3 */
+ uint16 length;
+ uint8 if_idx; /* interface index on which issue is reported */
+ uint8 ac; /* access category on which this problem is seen */
+ uint8 link_idx;
+ uint8 PAD;
+ uint32 rx_hc_pkts;
+ uint32 rx_hc_dropped_all;
+ uint32 rx_hc_alert_th;
+ uint32 reason; /* refer to bcm_rx_hc_stall_reason_t above */
+ struct ether_addr peer_ea;
+ uint8 PAD[2]; /* Reserved */
+} wl_rx_hc_info_v3_t;
+
/* HE top level command IDs */
enum {
WL_HE_CMD_ENAB = 0u,
@@ -24918,6 +25037,7 @@ typedef struct wl_mlo_config_v1 {
#define WL_MLO_UPDATE_MODE_PREF (1u << 0u) /* Mode is updated in the config req */
#define WL_MLO_UPDATE_BAND_PREF (1u << 1u) /* Band pref is updated in the config req */
#define WL_MLO_RESET_CONFIG_PREF (1u << 2u) /* Reset the pref_config to invalid */
+#define WL_MLO_STRICT_ORDER_CONFIG_PREF (1u << 3u) /* Strict preference order in config_pref */
#define WL_MLO_CONFIG_PREF_VER_1 1u
@@ -24932,7 +25052,7 @@ typedef struct wl_mlo_config_pref_v1 {
uint8 num_band_pref; /* Number of valid bands in pref_band array */
uint8 mode_pref[MLO_MODE_PREF_MAX_V1]; /* MLO mode priority order */
uint8 band_pref[WL_BAND_MAX_CNT]; /* Preferred band priority order */
- uint8 pad; /* Explicit padding */
+ uint8 strict_order; /* Strict preference order */
} wl_mlo_config_pref_v1_t;
/* mlo info structure per link */
@@ -25307,6 +25427,11 @@ typedef struct wl_mlo_tid_map_adv_v1 {
#define WL_MLO_EMLSR_CTRL_IS_SET(val, flag) (val & flag) ? 1u : 0u
#define WL_MLO_EMLSR_CTRL_VER_1 (1u)
+
+/* Bits for fw_status and disable/enable request */
+#define EMLSR_LINK_PRIO_REQ_HOST (1 << 0u) /* Disabled via HOST IOVAR */
+#define EMLSR_LINK_PRIO_REQ_LOW_LAT (1 << 1u) /* Disabled due to Low latency mode */
+
typedef struct wl_mlo_emlsr_ctrl_v1 {
uint16 version;
uint16 length;
@@ -25865,14 +25990,35 @@ typedef struct {
uint8 rowdata[]; /* read rows data */
} wl_otpecc_rows_t;
+/* "otp ecc" command */
+typedef struct {
+ uint16 version; /* version of this structure */
+ uint16 len; /* len in bytes of this structure */
+ uint16 cmdtype; /* command type : 0 : read row data,
+ * 1 : ECC lock,
+ * 2 : program a row data with Parity
+ * 3 : program a row data
+ * with Parity on locking row
+ * 4 : clear double bit error status
+ */
+ uint16 rowoffset; /* start row offset */
+ uint16 numrows; /* number of rows */
+ uint16 pad; /* for alignment */
+ uint32 progdata; /* a row data to program */
+} wl_otpecc_cmd_t;
+
#define WL_OTPECC_ROWS_VER 1
#define WL_OTPECC_ROWS_CMD_READ 0
#define WL_OTPECC_ROWS_CMD_LOCK 1
+#define WL_OTPECC_ROWS_CMD_PROG 2
+#define WL_OTPECC_ROWS_CMD_PROG_LOCK 3
+#define WL_OTPECC_ROWS_CMD_CLRDBLERR 4
#define WL_OTPECC_ARGIDX_CMDTYPE 0 /* command type */
#define WL_OTPECC_ARGIDX_ROWOFFSET 1 /* start row offset */
#define WL_OTPECC_ARGIDX_NUMROWS 2 /* number of rows */
+#define WL_OTPECC_ARGIDX_PROGDATA 3 /* a row data to program */
/* "otpeccrows" raw data size per row */
#define WL_ECCDUMP_ROW_SIZE_BYTE 6 /* 4 bytes row data + 2 bytes ECC status */
@@ -25922,6 +26068,7 @@ enum wl_otp_cmd_ids {
WL_OTP_CMD_RGNSTATUS = 1u,
WL_OTP_CMD_RGNDUMP = 2u,
WL_OTP_CMD_RGNWRITE = 3u,
+ WL_OTP_CMD_ECC = 4u,
/* Add before this !!! */
WL_OTP_CMD_LAST
};
@@ -25932,6 +26079,8 @@ enum wl_otp_xtlv_id {
WL_OTP_XTLV_ADDR = 2u, /* OTP region start address */
WL_OTP_XTLV_SIZE = 3u, /* OTP region size */
WL_OTP_XTLV_DATA = 4u, /* OTP dump data */
+ WL_OTP_XTLV_ECC_ROW = 5u, /* OTP ecc row */
+ WL_OTP_XTLV_ECC_ROW_DATA = 6u, /* OTP ecc row data */
};
@@ -26126,6 +26275,7 @@ typedef struct wlc_btcx_profile_v3 {
#define SSSR_REG_INFO_VER_2 2u
#define SSSR_REG_INFO_VER_3 3u
#define SSSR_REG_INFO_VER_4 4u
+#define SSSR_REG_INFO_VER_5 5u
typedef struct sssr_reg_info_v0 {
@@ -26498,6 +26648,111 @@ typedef struct sssr_reg_info_v4 {
uint32 fis_enab;
} sssr_reg_info_v4_t;
+typedef struct sssr_reg_info_v5 {
+ uint16 version;
+ uint16 length; /* length of the structure validated at host */
+ struct {
+ struct {
+ uint32 pmuintmask0;
+ uint32 pmuintmask1;
+ uint32 resreqtimer;
+ uint32 macresreqtimer;
+ uint32 macresreqtimer1;
+ uint32 macresreqtimer2;
+ uint32 pmu_min_res_mask; /* Adress of min res mask address */
+ uint32 pmu_max_res_mask; /* Adress of max res mask address */
+ uint32 sssr_max_res_mask; /* Max_res_mask value to be used for SSSR */
+ } base_regs;
+ } pmu_regs;
+ struct {
+ struct {
+ uint32 intmask;
+ uint32 powerctrl;
+ uint32 clockcontrolstatus;
+ uint32 powerctrl_mask;
+ } base_regs;
+ } chipcommon_regs;
+ struct {
+ struct {
+ uint32 clockcontrolstatus;
+ uint32 clockcontrolstatus_val;
+ } base_regs;
+ struct {
+ uint32 extrsrcreq;
+ } oobr_regs;
+ uint32 war_reg;
+ } arm_regs;
+ struct {
+ struct {
+ uint32 ltrstate;
+ uint32 clockcontrolstatus;
+ uint32 clockcontrolstatus_val;
+ } base_regs;
+ struct {
+ uint32 extrsrcreq;
+ } oobr_regs;
+ } pcie_regs;
+ struct {
+ struct {
+ uint32 xmtaddress;
+ uint32 xmtdata;
+ uint32 clockcontrolstatus;
+ uint32 clockcontrolstatus_val;
+ } base_regs;
+ struct {
+ uint32 extrsrcreq;
+ } oobr_regs;
+ uint32 sr_size;
+ uint32 war_reg;
+ } mac_regs[MAX_NUM_D11_CORES_WITH_SCAN];
+
+ struct {
+ struct {
+ uint32 clockcontrolstatus;
+ uint32 clockcontrolstatus_val;
+ } base_regs;
+ struct {
+ uint32 extrsrcreq;
+ } oobr_regs;
+
+ uint32 war_reg;
+ uint32 saqm_sssr_addr;
+ uint32 saqm_sssr_size;
+
+ struct {
+ uint32 digsr_srcontrol1_addr; /* DIGSR engine sr control1 register */
+ uint32 digsr_srcontrol1_clrbit_val; /* clear these bits from srcontrol1 */
+
+ uint32 digsr_srcontrol2_addr; /* DIGSR engine sr control2 register */
+ uint32 digsr_srcontrol2_setbit_val; /* Value to set in the above address */
+
+ uint32 pmuchip_ctl_addr_reg;
+ uint32 pmuchip_ctl_val;
+ uint32 pmuchip_ctl_data_reg;
+ uint32 pmuchip_ctl_setbit_val;
+ } sssr_config_regs;
+ } saqm_sssr_info;
+
+ struct {
+ uint32 dig_sssr_addr;
+ uint32 dig_sssr_size;
+ } dig_mem_info;
+
+ struct {
+ uint32 fis_addr;
+ uint32 fis_size;
+ } fis_mem_info;
+
+ /* Start address and end address for SSSR collection by host. */
+ struct {
+ uint32 sysmem_sssr_addr;
+ uint32 sysmem_sssr_size;
+ } sssr_all_mem_info;
+
+ uint32 fis_enab;
+ uint16 sr_asm_version; /* SR ASM version to help SSSR extraction scripts */
+ uint16 PAD;
+} sssr_reg_info_v5_t;
/* A wrapper structure for all versions of SSSR register information structures */
typedef union sssr_reg_info {
@@ -26506,6 +26761,7 @@ typedef union sssr_reg_info {
sssr_reg_info_v2_t rev2;
sssr_reg_info_v3_t rev3;
sssr_reg_info_v4_t rev4;
+ sssr_reg_info_v5_t rev5;
} sssr_reg_info_cmn_t;
/* ADaptive Power Save(ADPS) structure definition */
@@ -27938,9 +28194,22 @@ typedef struct wl_avs_info_v1 {
uint32 aging; /* aging setting in nvram */
} wl_avs_info_v1_t;
-/* Placeholder, add more fields later */
+/* The value is read from AVSCapabilities.AVSHistogramDepth in 4390a0/4399b0 */
+#define AVS_HIST_ENTRY_NUM 8
+
+typedef struct wl_avs_hist_entry_s {
+ uint32 time; /* in ms */
+ uint32 voltage_ndv; /* in mV */
+} wl_avs_hist_entry_t;
+
typedef struct wl_avs_info_v2 {
uint16 version; /* Structure version */
+ uint16 length; /* length of structure */
+ int16 ro_margin_idx; /* -1 when avsdump_internal=1 is not in nvram */
+ uint16 voltage_ndv; /* current NDV voltage in mV */
+ uint16 voltage_ldv; /* current LDV voltage in mV */
+ uint16 hist_entry_num; /* 0 when avsdump_internal=1 is not in nvram */
+ wl_avs_hist_entry_t hist[AVS_HIST_ENTRY_NUM];
} wl_avs_info_v2_t;
#define WL_AVS_INFO_VER_1 1
@@ -27953,6 +28222,8 @@ enum wl_sc_cmd {
WL_SC_CMD_CAP = 2,
WL_SC_CMD_CONFIG = 3,
WL_SC_CMD_PMALERT_ADJ_FACTOR = 4,
+ WL_SC_CMD_FEMASK = 5,
+ WL_SC_CMD_MSCHANS = 6,
WL_SC_CMD_LAST
};
@@ -28185,6 +28456,7 @@ typedef struct key_update_info_v1
#define WL_OMI_CONFIG_VERSION_1 1u
+#define WL_OMI_CONFIG_VERSION_2 2u
/* values for valid_bm */
#define OMI_CONFIG_VALID_BMP_RXNSS 0x0001u
@@ -28205,6 +28477,14 @@ typedef struct key_update_info_v1
#define OMI_CONFIG_RXNSS_EXT_MAX 1u
#define OMI_CONFIG_TXNSTS_EXT_MAX 1u
+/** band_bitmap indicating bands for which OMI config is get/set */
+#define WL_OMI_BAND_BITMAP_2G 0x1u /* 2g field updated */
+#define WL_OMI_BAND_BITMAP_5G 0x2u /* 5g field updated */
+#define WL_OMI_BAND_BITMAP_6G 0x4u /* 6g field updated */
+#define WL_OMI_BAND_BITMAP_ALL (WL_OMI_BAND_BITMAP_2G | \
+ WL_OMI_BAND_BITMAP_5G | \
+ WL_OMI_BAND_BITMAP_6G)
+
typedef struct wl_omi_config {
uint16 valid_bm; /* validity bitmask for each config */
uint8 rxnss;
@@ -28225,6 +28505,14 @@ typedef struct wl_omi_req {
wl_omi_config_t config;
} wl_omi_req_v1_t;
+typedef struct wl_omi_req_v2 {
+ uint16 version;
+ uint16 len;
+ uint8 band_bitmap; /* bands for which config is updated */
+ uint8 PAD[3];
+ wl_omi_config_t config;
+} wl_omi_req_v2_t;
+
/* Bits for ULMU disable reason */
#define OMI_ULMU_DISABLED_HOST 0x01u /* Host has disabled through he omi */
@@ -29496,6 +29784,7 @@ typedef enum wlc_sta_pm_sc_ofld_exit_reason {
STA_PM_SC_OFLD_EXIT_AP_BSS = 21u, /* Exit due to AP BSS active */
STA_PM_SC_OFLD_EXIT_MLO = 22u, /* Exit due to high priority MLO link */
STA_PM_SC_OFLD_EXIT_TDLS = 23u, /* Exit due to TDLS active */
+ STA_PM_SC_OFLD_EXIT_EMLSR_ML_MODE_CHANGE = 24u, /* Exit due to EMLSR ML mode change */
STA_PM_SC_OFLD_EXIT_MAX = 255u /* Max, uint8 for now */
} wlc_sta_pm_sc_ofld_exit_reason_t;
@@ -31765,6 +32054,18 @@ typedef struct wlc_sup_oper_class_cfg_v1 {
WLC_SUP_OPR_CLS_IE_SUP_OPR_CLS_PRES_VAL)
+#ifdef PHY_FCBS_CHSW
+/* for FW FCBS_HW control version */
+#define WL_FCBS_CHSW_IOV_VERSION_1 1u
+
+/* FW FCBS_CHSW control iovar subcmd ids */
+enum {
+ IOV_FCBS_CHSW_ENAB = 1,
+ IOV_FCBS_CHSW_STATUS = 2,
+ IOV_FCBS_CHSW_LAST
+};
+#endif /* PHY_FCBS_CHSW */
+
#define BSS_STA_INFO_PARAM_VER_1 1u
/** Input structure for IOV_BSS_PEER_INFO */
@@ -31807,4 +32108,114 @@ typedef struct bss_sta_list_info {
} bss_sta_list_info_t;
#define BSS_STA_LIST_INFO_FIXED_LEN OFFSETOF(bss_sta_list_info_t, peer_sta_info)
+
+#define CAPEXT_INFO_VERSION_1 (1u)
+
+/* TODO: This would be moved to src once component depedency is addressed */
+#ifndef BCM_CAPEXT
+#define CAPEXT_INFO_VERSION CAPEXT_INFO_VERSION_1
+#endif /* BCM_CAPEXT */
+
+/* Top structure of capext reporting. For reporting, feature ids are used as types in XTLVs */
+typedef struct {
+ uint16 version; /**< see definition of CAPEXT_INFO_VERSION */
+ uint16 datalen; /**< length of data including all paddings. */
+ uint8 data []; /**< variable length payload:
+ * 1 or more bcm_xtlv_t type of tuples.
+ * each tuple is padded to multiple of 4 bytes.
+ * 'datalen' field of this structure includes all paddings.
+ */
+} capext_info_t;
+
+/* Each feature reported in capext has a feature id. Feature id is a 16-bit value.
+ * The feature id namespace is split into 3 partitions. One for BUS, the second for RTE,
+ * and the third for WL. All partitions are contiguous and fixed in size
+ */
+#define CAPEXT_FEATURE_ID_NUM_PARTITIONS (3u)
+#define CAPEXT_FEATURE_ID_PARTITION_SIZE (1024u)
+/* Feature IDs from 3072 for capext are reserved */
+#define CAPEXT_RSVD_FEATURE_ID_BASE (3072u)
+
+/* Bus partition */
+/* The features listed in the enumeration below have subfeatures.
+ * If a new feature is added/updated and that feature has sub-features that need to be reported,
+ * add that feature here
+ */
+#define CAPEXT_BUS_FEATURE_ID_BASE (0)
+enum capext_bus_feature_ids {
+ CAPEXT_BUS_FEATURE_RSVD = (CAPEXT_BUS_FEATURE_ID_BASE + 0),
+ /* BUS top level feature id to hold and report bitmaps of features with and
+ * without sub-features.
+ */
+ CAPEXT_BUS_FEATURE_BUS_FEATURES = (CAPEXT_BUS_FEATURE_ID_BASE + 1),
+ /* BUS feature ids below hold and report sub-feature bitmaps of some features
+ * mentioned in top level feature id bitmap
+ */
+ CAPEXT_BUS_FEATURE_PKTLAT = (CAPEXT_BUS_FEATURE_ID_BASE + 2),
+ CAPEXT_BUS_FEATURE_MAX
+};
+
+/* BUS features bit positions in top level rte feature id. Features mentioned below are reported */
+enum capext_bus_feature_bitpos {
+ CAPEXT_BUS_FEATURE_BITPOS_HP2P = 0,
+ CAPEXT_BUS_FEATURE_BITPOS_PTM = 1,
+ CAPEXT_BUS_FEATURE_BITPOS_PKTLAT = 2,
+ CAPEXT_BUS_FEATURE_BITPOS_BUSTPUT = 3, /* feature with sub-features */
+ CAPEXT_BUS_FEATURE_BITPOS_MAX
+};
+
+/* Packet latency sub-feature bit positions. These sub-features need to be reported */
+enum capext_pktlat_subfeature_bitpos {
+ CAPEXT_PKTLAT_BITPOS_META = 0,
+ CAPEXT_PKTLAT_BITPOS_IPC = 1,
+ CAPEXT_PKTLAT_BITPOS_MAX
+};
+
+/* RTE partition */
+/* The features listed in the enumeration below have subfeatures.
+ * If a new feature is added and that feature has sub-features that need to be reported,
+ * add that feature here
+ */
+#define CAPEXT_RTE_FEATURE_ID_BASE (1024u)
+enum capext_rte_feature_ids {
+ CAPEXT_RTE_FEATURE_RSVD = (CAPEXT_RTE_FEATURE_ID_BASE + 0),
+ /* RTE top level feature id to hold and report bitmaps of features with and
+ * without sub-features.
+ */
+ CAPEXT_RTE_FEATURE_RTE_FEATURES = (CAPEXT_RTE_FEATURE_ID_BASE + 1),
+ /* RTE feature ids below hold and report sub-feature bitmaps of some features
+ * mentioned in top level feature id bitmap
+ */
+ CAPEXT_RTE_FEATURE_ECOUNTERS = (CAPEXT_RTE_FEATURE_ID_BASE + 2),
+ CAPEXT_RTE_FEATURE_MAX
+};
+
+/* Ecounters sub-feature bit positions. These sub-features need to be reported */
+enum capext_ecounters_subfeature_bitpos {
+ CAPEXT_ECOUNTERS_BITPOS_TXHIST = 0,
+ CAPEXT_ECOUNTERS_BITPOS_ADV = 1,
+ CAPEXT_ECOUNTERS_BITPOS_PHY = 2,
+ CAPEXT_ECOUNTERS_BITPOS_PHY_CAL = 3,
+ CAPEXT_ECOUNTERS_BITPOS_CHSTATS = 4,
+ CAPEXT_ECOUNTERS_BITPOS_PEERSTATS = 5,
+ CAPEXT_ECOUNTERS_BITPOS_DTIM_MISS = 6,
+ CAPEXT_ECOUNTERS_BITPOS_MAX
+};
+
+/* RTE features bit positions in top level rte feature id. Features mentioned below are reported */
+enum capext_rte_feature_bitpos {
+ CAPEXT_RTE_FEATURE_BITPOS_H2D_LOG_TIME_SYNC = 0,
+ CAPEXT_RTE_FEATURE_BITPOS_HWRNG = 1,
+ CAPEXT_RTE_FEATURE_BITPOS_SPMI = 2,
+ CAPEXT_RTE_FEATURE_BITPOS_ECOUNTERS = 3, /* feature with sub-features */
+ CAPEXT_RTE_FEATURE_BITPOS_EVENT_LOG = 4,
+
+ CAPEXT_RTE_FEATURE_BITPOS_LOGTRACE = 5,
+ CAPEXT_RTE_FEATURE_BITPOS_HCHK = 6,
+ CAPEXT_RTE_FEATURE_BITPOS_SMD = 7,
+ CAPEXT_RTE_FEATURE_BITPOS_ETD = 8,
+ CAPEXT_RTE_FEATURE_BITPOS_CST = 9,
+ CAPEXT_RTE_FEATURE_BITPOS_MAX
+};
+
#endif /* _wlioctl_h_ */
diff --git a/include/wlioctl_defs.h b/include/wlioctl_defs.h
index 2ddfc7c..ba5be15 100644
--- a/include/wlioctl_defs.h
+++ b/include/wlioctl_defs.h
@@ -297,6 +297,9 @@ typedef uint32 ratespec_t;
* enable LISTEN along with PASSIVE flag
*/
+/* WL_SCANFLAGS_EXT_ flags */
+#define WL_SCANFLAGS_EXT_LOWPOWER_PARALLEL_2G_SCAN 0x1U /* Lowpower parallel 2G scan */
+
/* BIT MASK for 6G_SCAN_TYPE */
#define WL_SCAN_SSIDFLAGS_SHORT_SSID 0x01U /* include short ssid */
#define WL_SCAN_INC_RNR 0x02U /* Include RNR channels for scan */
@@ -465,6 +468,7 @@ typedef uint32 ratespec_t;
#define WL_BSS2_FLAGS_RNR_MATCH 0x10 /* To report original BSS that has RNR match */
#define WL_BSS2_FLAGS_HE_BCN_PRBRSP 0x20 /* BSS update to indiacte HE bcn or prb rsp. */
#define WL_BSS2_FLAGS_HE_6G_DUP 0x40 /* non-HT dup'ed beacon indicator */
+#define WL_BSS2_FLAGS_FROM_SS 0x80 /* bss_info from results on slim scan */
/* bit definitions for bcnflags in wl_bss_info */
#define WL_BSS_BCNFLAGS_INTERWORK_PRESENT 0x01 /* beacon had IE, accessnet valid */
@@ -1475,6 +1479,8 @@ typedef uint32 ratespec_t;
#define WL_WBUS_VAL 0x00000008
#define WL_DTPC_DBG_VAL 0x00000010
#define WL_DYNBW_DBG_VAL 0x00000020
+#define WL_RATE_INFO_VAL 0x00000040
+#define WL_RATE_TRACE_VAL 0x00000080
/* number of bytes needed to define a proper bit mask for MAC event reporting */
#define BCMIO_ROUNDUP(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
@@ -2170,6 +2176,7 @@ typedef uint32 ratespec_t;
/* report found/lost events for SSID and BSSID networks seperately */
#define REPORT_SEPERATELY_BIT 11
#define BESTN_BSSID_ONLY_BIT 12
+#define BESTN_BSSID_ASSOC_BIT 13
#define SORT_CRITERIA_MASK 0x0001
#define AUTO_NET_SWITCH_MASK 0x0002
@@ -2186,6 +2193,7 @@ typedef uint32 ratespec_t;
#define REPORT_SEPARATELY_MASK 0x0800
#define REPORT_SEPERATELY_MASK REPORT_SEPARATELY_MASK // Legacy typo
#define BESTN_BSSID_ONLY_MASK 0x1000
+#define BESTN_BSSID_ASSOC_MASK 0x2000
#ifdef PFN_SCANRESULT_2
#define PFN_SCANRESULT_VERSION 2
@@ -2204,8 +2212,11 @@ typedef uint32 ratespec_t;
#define DEFAULT_REPEAT 10
#define DEFAULT_EXP 2
-#define PFN_PARTIAL_SCAN_BIT 0
-#define PFN_PARTIAL_SCAN_MASK 1
+/* Results flags for pfn scan history */
+#define PFN_PARTIAL_SCAN_BIT 0u
+#define PFN_PARTIAL_SCAN_MASK 0x1u
+#define PFN_ASSOCIATED_AP_BIT 1u
+#define PFN_ASSOCIATED_AP_MASK 0x2u
#define WL_PFN_SUPPRESSFOUND_MASK 0x08
#define WL_PFN_SUPPRESSLOST_MASK 0x10
@@ -2697,7 +2708,7 @@ enum {
#define WLC_WITH_XTLV_CNT
/* Number of xtlv info as required to calculate subcounter offsets */
-#define WL_CNT_XTLV_ID_NUM 12
+#define WL_CNT_XTLV_ID_NUM 13
#define WL_TLV_IOV_VERSION_1 1u
#define WL_TLV_IOV_VERSION_2 2u
@@ -2756,6 +2767,7 @@ enum wl_cnt_xtlv_id {
WL_CNT_XTLV_GE88_UCODE_RX_U32_V2 = 0x100a, /* corerev >= 88 ucode macstats V2 - rx */
WL_CNT_XTLV_DYN_BW_STATS_V1 = 0x100b, /* Deprecated */
WL_CNT_XTLV_DYN_BW_STATS = 0x100c, /* corerev >= 88 DynBW stats */
+ WL_CNT_XTLV_SLIM_SCAN_STATS = 0x100d /* Slim Scan stats */
};
/* bitmap for clm_flags iovar */
@@ -3311,6 +3323,7 @@ enum wlc_capext_feature_bitpos {
WLC_CAPEXT_FEATURE_BITPOS_OCV_AP = 125,
WLC_CAPEXT_FEATURE_BITPOS_WIFI_BT5G = 126,
WLC_CAPEXT_FEATURE_BITPOS_SAE_EXT = 127,
+ WLC_CAPEXT_FEATURE_BITPOS_11AZ = 128,
WLC_CAPEXT_FEATURE_BITPOS_MAX
};
diff --git a/linux_pkt.c b/linux_pkt.c
index d995480..4b6b3d7 100644
--- a/linux_pkt.c
+++ b/linux_pkt.c
@@ -30,6 +30,7 @@
#include <osl.h>
#include <bcmutils.h>
+#include <bcmstdlib_s.h>
#include <pcicfg.h>
#if defined(BCMASSERT_LOG) && !defined(OEM_ANDROID)
@@ -112,8 +113,13 @@ int osl_static_mem_init(osl_t *osh, void *adapter)
return -ENOMEM;
}
- bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) *
- (STATIC_PKT_MAX_NUM));
+ if (memcpy_s(bcm_static_skb,
+ STATIC_BUF_TOTAL_LEN,
+ skb_buff_ptr,
+ sizeof(struct sk_buff *) * (STATIC_PKT_MAX_NUM))) {
+ BCM_PRINT(("static buf too small!\n"));
+ return -ENOMEM;
+ }
for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
bcm_static_skb->pkt_use[i] = 0;
}
@@ -735,9 +741,9 @@ osl_pkttrace(osl_t *osh, void *pkt, uint16 bit)
#endif /* BCMDBG_PTRACE */
char *
-osl_pktlist_dump(osl_t *osh, char *buf)
+osl_pktlist_dump(osl_t *osh, char *buf, uint bufsz)
{
- pktlist_dump(&(osh->cmn->pktlist), buf);
+ pktlist_dump(&(osh->cmn->pktlist), buf, bufsz);
return buf;
}
diff --git a/wb_regon_coordinator.c b/wb_regon_coordinator.c
index 470e9ef..171519f 100644
--- a/wb_regon_coordinator.c
+++ b/wb_regon_coordinator.c
@@ -45,6 +45,10 @@
#define DEVICE_NAME "wbrc"
#define CLASS_NAME "bcm"
+#ifndef BCM_REFERENCE
+#define BCM_REFERENCE(data) ((void)(data))
+#endif
+
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0))
typedef unsigned int __poll_t;
#endif
@@ -217,6 +221,7 @@ wbrc_bt_dev_write(struct file *filep, const char *buffer,
}
#ifdef WBRC_TEST
+ BCM_REFERENCE(stub_msg);
if (offset && *offset == 0xDEADFACE) {
memcpy(&msg, buffer, WBRC_MSG_LEN);
pr_err("%s: msg from wbrc stub: %x %x %x %x \n", __func__,
@@ -378,6 +383,12 @@ wbrc_bt_dev_open(struct inode *inodep, struct file *filep)
return -EFAULT;
}
+ if (!wbrc_data->wl_hdl) {
+ pr_err("%s: wl not inited !\n", __func__);
+ WBRC_UNLOCK(wbrc_mutex);
+ return -EFAULT;
+ }
+
if (wbrc_data->bt_dev_opened) {
pr_err("%s already opened\n", __func__);
WBRC_UNLOCK(wbrc_mutex);
diff --git a/wl_android.c b/wl_android.c
index a155ef9..77bffb5 100644
--- a/wl_android.c
+++ b/wl_android.c
@@ -5541,9 +5541,17 @@ wbrc2wl_wlan_on_request(void *dhd_pub)
dhd_net_wifi_platform_set_power(dev, TRUE, WIFI_TURNON_DELAY);
ret = dhd_net_bus_devreset(dev, FALSE);
if (!ret) {
- /* Keep the link in L2 */
- DHD_PRINT(("%s: calling suspend\n", __FUNCTION__));
- dhd_net_bus_suspend(dev);
+ uint32 val = 0;
+ /* Make wl up, so that minresmask is programmed */
+ DHD_PRINT(("%s: calling wl up\n", __FUNCTION__));
+ ret = wldev_ioctl_set(dev, WLC_UP, &val, sizeof(val));
+ if (unlikely(ret)) {
+ DHD_ERROR(("WLC_UP error (%d)\n", ret));
+ } else {
+ /* Keep the link in L2 */
+ DHD_PRINT(("%s: calling suspend\n", __FUNCTION__));
+ dhd_net_bus_suspend(dev);
+ }
g_wifi_on = TRUE;
} else {
/* if wlan on fails, turn it off to keep it in a sane state */
diff --git a/wl_cfg80211.c b/wl_cfg80211.c
index 758b16c..75a2b19 100644
--- a/wl_cfg80211.c
+++ b/wl_cfg80211.c
@@ -814,7 +814,7 @@ wl_cfg80211_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request
#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- bool update_ssid, u8 *mac, const u8 *mld_mac);
+ bool update_ssid, u8 *mac);
static void wl_cfg80211_work_handler(struct work_struct *work);
static s32 wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
u8 key_idx, const u8 *mac_addr,
@@ -879,6 +879,9 @@ int init_roam_cache(struct bcm_cfg80211 *cfg, int ioctl_ver);
#ifdef P2P_LISTEN_OFFLOADING
s32 wl_cfg80211_p2plo_deinit(struct bcm_cfg80211 *cfg);
#endif /* P2P_LISTEN_OFFLOADING */
+extern struct wireless_dev * wl_cfgp2p_if_add(struct bcm_cfg80211 *cfg, wl_iftype_t wl_iftype,
+ char const *name, u8 *mac_addr, s32 *ret_err);
+extern s32 wl_cfgp2p_if_del(struct wiphy *wiphy, struct wireless_dev *wdev);
#ifdef CUSTOMER_HW4_DEBUG
extern bool wl_scan_timeout_dbg_enabled;
@@ -2282,127 +2285,6 @@ wl_wlfc_enable(struct bcm_cfg80211 *cfg, bool enable)
#endif /* PROP_TXSTATUS_VSDB */
}
-struct wireless_dev *
-wl_cfg80211_p2p_if_add(struct bcm_cfg80211 *cfg,
- wl_iftype_t wl_iftype,
- char const *name, u8 *mac_addr, s32 *ret_err)
-{
- u16 chspec;
- s16 cfg_type;
- long timeout;
- s32 err;
- u16 p2p_iftype;
- int dhd_mode;
- struct net_device *new_ndev = NULL;
- struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
- struct ether_addr *p2p_addr;
-
- *ret_err = BCME_OK;
- if (!cfg->p2p) {
- WL_ERR(("p2p not initialized\n"));
- return NULL;
- }
-
-#if defined(WL_CFG80211_P2P_DEV_IF)
- if (wl_iftype == WL_IF_TYPE_P2P_DISC) {
- /* Handle Dedicated P2P discovery Interface */
- return wl_cfgp2p_add_p2p_disc_if(cfg);
- }
-#endif /* WL_CFG80211_P2P_DEV_IF */
-
- if (wl_iftype == WL_IF_TYPE_P2P_GO) {
- p2p_iftype = WL_P2P_IF_GO;
- } else {
- p2p_iftype = WL_P2P_IF_CLIENT;
- }
-
- /* Dual p2p doesn't support multiple P2PGO interfaces,
- * p2p_go_count is the counter for GO creation
- * requests.
- */
- if ((cfg->p2p->p2p_go_count > 0) && (wl_iftype == WL_IF_TYPE_P2P_GO)) {
- WL_ERR(("FW does not support multiple GO\n"));
- *ret_err = -ENOTSUPP;
- return NULL;
- }
- if (!cfg->p2p->on) {
- p2p_on(cfg) = true;
- wl_cfgp2p_set_firm_p2p(cfg);
- wl_cfgp2p_init_discovery(cfg);
- }
-
- strlcpy(cfg->p2p->vir_ifname, name, sizeof(cfg->p2p->vir_ifname));
- /* In concurrency case, STA may be already associated in a particular channel.
- * so retrieve the current channel of primary interface and then start the virtual
- * interface on that.
- */
- chspec = wl_cfg80211_get_shared_freq(wiphy);
-
- /* For P2P mode, use P2P-specific driver features to create the
- * bss: "cfg p2p_ifadd"
- */
- wl_set_p2p_status(cfg, IF_ADDING);
- bzero(&cfg->if_event_info, sizeof(cfg->if_event_info));
- cfg_type = wl_cfgp2p_get_conn_idx(cfg);
- if (cfg_type < BCME_OK) {
- wl_clr_p2p_status(cfg, IF_ADDING);
- WL_ERR(("Failed to get connection idx for p2p interface"
- ", error code = %d", cfg_type));
- return NULL;
- }
-
- p2p_addr = wl_to_p2p_bss_macaddr(cfg, cfg_type);
- memcpy(p2p_addr->octet, mac_addr, ETH_ALEN);
-
- err = wl_cfgp2p_ifadd(cfg, p2p_addr,
- htod32(p2p_iftype), chspec);
- if (unlikely(err)) {
- wl_clr_p2p_status(cfg, IF_ADDING);
- WL_ERR((" virtual iface add failed (%d) \n", err));
- return NULL;
- }
-
- /* Wait for WLC_E_IF event with IF_ADD opcode */
- timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
- ((wl_get_p2p_status(cfg, IF_ADDING) == false) &&
- (cfg->if_event_info.valid)),
- msecs_to_jiffies(MAX_WAIT_TIME));
- if (timeout > 0 && !wl_get_p2p_status(cfg, IF_ADDING) && cfg->if_event_info.valid) {
- wl_if_event_info *event = &cfg->if_event_info;
- new_ndev = wl_cfg80211_post_ifcreate(bcmcfg_to_prmry_ndev(cfg), event,
- event->mac, cfg->p2p->vir_ifname, false);
- if (unlikely(!new_ndev)) {
- goto fail;
- }
-
- if (wl_iftype == WL_IF_TYPE_P2P_GO) {
- cfg->p2p->p2p_go_count++;
- }
- /* Fill p2p specific data */
- wl_to_p2p_bss_ndev(cfg, cfg_type) = new_ndev;
- wl_to_p2p_bss_bssidx(cfg, cfg_type) = event->bssidx;
-
- WL_ERR((" virtual interface(%s) is "
- "created net attach done\n", cfg->p2p->vir_ifname));
-#if defined(BCMDONGLEHOST)
- dhd_mode = (wl_iftype == WL_IF_TYPE_P2P_GC) ?
- DHD_FLAG_P2P_GC_MODE : DHD_FLAG_P2P_GO_MODE;
- DNGL_FUNC(dhd_cfg80211_set_p2p_info, (cfg, dhd_mode));
-#endif /* defined(BCMDONGLEHOST) */
- /* reinitialize completion to clear previous count */
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
- INIT_COMPLETION(cfg->iface_disable);
-#else
- init_completion(&cfg->iface_disable);
-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) */
-
- return new_ndev->ieee80211_ptr;
- }
-
-fail:
- return NULL;
-}
-
static int
wl_iovar_fn(struct bcm_cfg80211 *cfg, struct net_device *ndev, struct preinit_iov *data)
{
@@ -2709,140 +2591,6 @@ wl_cfg80211_iface_state_ops(struct wireless_dev *wdev,
return BCME_OK;
}
-static s32
-wl_cfg80211_p2p_if_del(struct wiphy *wiphy, struct wireless_dev *wdev)
-{
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- s16 bssidx;
- s16 err;
- s32 cfg_type;
- struct net_device *ndev;
- long timeout;
- struct ether_addr p2p_dev_addr = {{0}};
-
- if (unlikely(!wl_get_drv_status(cfg, READY, bcmcfg_to_prmry_ndev(cfg)))) {
- WL_INFORM_MEM(("device is not ready\n"));
- return BCME_NOTFOUND;
- }
-
-#ifdef WL_CFG80211_P2P_DEV_IF
- if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
- /* Handle dedicated P2P discovery interface. */
- return wl_cfgp2p_del_p2p_disc_if(wdev, cfg);
- }
-#endif /* WL_CFG80211_P2P_DEV_IF */
-
- /* Handle P2P Group Interface */
- bssidx = wl_get_bssidx_by_wdev(cfg, wdev);
- if (bssidx <= 0) {
- WL_ERR(("bssidx not found\n"));
- return BCME_NOTFOUND;
- }
- if (wl_cfgp2p_find_type(cfg, bssidx, &cfg_type) != BCME_OK) {
- /* Couldn't find matching iftype */
- WL_MEM(("non P2P interface\n"));
- return BCME_NOTFOUND;
- }
-
- ndev = wdev->netdev;
- (void)memcpy_s(p2p_dev_addr.octet, ETHER_ADDR_LEN,
- ndev->dev_addr, ETHER_ADDR_LEN);
-
- wl_clr_p2p_status(cfg, GO_NEG_PHASE);
- wl_clr_p2p_status(cfg, IF_ADDING);
-
- /* for GO */
- if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) {
- wl_add_remove_eventmsg(ndev, WLC_E_PROBREQ_MSG, false);
- cfg->p2p->p2p_go_count--;
- /* disable interface before bsscfg free */
- err = wl_cfgp2p_ifdisable(cfg, &p2p_dev_addr);
- /* if fw doesn't support "ifdis",
- do not wait for link down of ap mode
- */
- if (err == 0) {
- WL_ERR(("Wait for Link Down event for GO !!!\n"));
- wait_for_completion_timeout(&cfg->iface_disable,
- msecs_to_jiffies(500));
- } else if (err != BCME_UNSUPPORTED) {
- msleep(300);
- }
- } else {
- /* GC case */
- if (wl_get_drv_status(cfg, DISCONNECTING, ndev)) {
- WL_ERR(("Wait for Link Down event for GC !\n"));
- wait_for_completion_timeout
- (&cfg->iface_disable, msecs_to_jiffies(500));
- }
-
- /* Force P2P disconnect in iface down context */
- if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
- WL_INFORM_MEM(("force send disconnect event\n"));
- CFG80211_DISCONNECTED(ndev, 0, NULL, 0, false, GFP_KERNEL);
- wl_clr_drv_status(cfg, AUTHORIZED, ndev);
- }
- }
-
- bzero(&cfg->if_event_info, sizeof(cfg->if_event_info));
- wl_set_p2p_status(cfg, IF_DELETING);
- DNGL_FUNC(dhd_cfg80211_clean_p2p_info, (cfg));
-
- err = wl_cfgp2p_ifdel(cfg, &p2p_dev_addr);
- if (unlikely(err)) {
- WL_ERR(("P2P IFDEL operation failed, error code = %d\n", err));
- err = BCME_ERROR;
- goto fail;
- } else {
- /* Wait for WLC_E_IF event */
- timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
- ((wl_get_p2p_status(cfg, IF_DELETING) == false) &&
- (cfg->if_event_info.valid)),
- msecs_to_jiffies(MAX_WAIT_TIME));
- if (timeout > 0 && !wl_get_p2p_status(cfg, IF_DELETING) &&
- cfg->if_event_info.valid) {
- WL_ERR(("P2P IFDEL operation done\n"));
- err = BCME_OK;
- } else {
- WL_ERR(("IFDEL didn't complete properly\n"));
- err = -EINVAL;
- }
- }
-
-fail:
- /* Even in failure case, attempt to remove the host data structure.
- * Firmware would be cleaned up via WiFi reset done by the
- * user space from hang event context (for android only).
- */
- bzero(cfg->p2p->vir_ifname, IFNAMSIZ);
- wl_to_p2p_bss_bssidx(cfg, cfg_type) = -1;
- wl_to_p2p_bss_ndev(cfg, cfg_type) = NULL;
- wl_clr_drv_status(cfg, CONNECTED, wl_to_p2p_bss_ndev(cfg, cfg_type));
-
- /* Clear our saved WPS and P2P IEs for the discovery BSS */
- wl_cfg80211_clear_p2p_disc_ies(cfg);
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
- if (cfg->wiphy_lock_held) {
- schedule_delayed_work(&cfg->remove_iface_work, 0);
- } else
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */
- {
- WL_DBG_MEM(("Did not Schedule remove iface work\n"));
-#ifdef BCMDONGLEHOST
- dhd_net_if_lock(ndev);
-#endif /* BCMDONGLEHOST */
- if (cfg->if_event_info.ifidx) {
- /* Remove interface except for primary ifidx */
- wl_cfg80211_remove_if(cfg, cfg->if_event_info.ifidx, ndev, FALSE);
- }
-#ifdef BCMDONGLEHOST
- dhd_net_if_unlock(ndev);
-#endif /* BCMDONGLEHOST */
- }
-
- return err;
-}
-
static struct wireless_dev *
wl_cfg80211_add_monitor_if(struct wiphy *wiphy, const char *name)
{
@@ -3070,7 +2818,7 @@ _wl_cfg80211_add_if(struct bcm_cfg80211 *cfg,
/* falls through */
case WL_IF_TYPE_P2P_GC:
if (cfg->p2p_supported) {
- wdev = wl_cfg80211_p2p_if_add(cfg, wl_iftype,
+ wdev = wl_cfgp2p_if_add(cfg, wl_iftype,
name, mac_addr, &err);
break;
}
@@ -3228,7 +2976,7 @@ _wl_cfg80211_del_if(struct bcm_cfg80211 *cfg, struct net_device *primary_ndev,
* so netinfo list may not have any node corresponding to
* discovery I/F. Handle it before bssidx check.
*/
- ret = wl_cfg80211_p2p_if_del(wiphy, wdev);
+ ret = wl_cfgp2p_if_del(wiphy, wdev);
if (unlikely(ret)) {
goto exit;
} else {
@@ -4559,7 +4307,7 @@ wl_cfg80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev)
}
/* Handle p2p iface */
- if ((ret = wl_cfg80211_p2p_if_del(wiphy, wdev)) != BCME_NOTFOUND) {
+ if ((ret = wl_cfgp2p_if_del(wiphy, wdev)) != BCME_NOTFOUND) {
WL_DBG(("P2P iface del handled \n"));
#ifdef SUPPORT_SET_CAC
wl_cfg80211_set_cac(cfg, 1);
@@ -6804,6 +6552,7 @@ wl_cfg80211_ml_link_del(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev,
s32 i, j;
s32 ret = BCME_OK;
wl_mlo_link_t *ml_link = NULL;
+ unsigned long flags;
info = (wl_mlo_link_info_event_v1_t *)data;
WL_DBG_MEM(("ver:%d len:%d op_code:%d role:%d num_links:%d\n",
@@ -6819,8 +6568,9 @@ wl_cfg80211_ml_link_del(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev,
return BCME_ERROR;
}
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
/* All events are received on MLD interface */
- mld_netinfo = wl_get_netinfo_by_wdev(cfg, wdev);
+ mld_netinfo = _wl_get_netinfo_by_wdev(cfg, wdev);
if (!mld_netinfo) {
WL_ERR(("ml netinfo not found\n"));
ret = BCME_ERROR;
@@ -6829,7 +6579,8 @@ wl_cfg80211_ml_link_del(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev,
if (!mld_netinfo->mlinfo.num_links) {
WL_INFORM(("no ml links\n"));
- return BCME_OK;
+ ret = BCME_OK;
+ goto exit;
}
if (info->num_links > mld_netinfo->mlinfo.num_links) {
@@ -6863,6 +6614,7 @@ wl_cfg80211_ml_link_del(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev,
}
exit:
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
if (ret) {
/* Failure not expected. Trigger hangevent for wifi recovery */
#if defined(BCMDONGLEHOST) && defined(OEM_ANDROID)
@@ -8033,6 +7785,11 @@ wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
err = -EINVAL;
goto fail;
}
+ if (wl_get_drv_status_all(cfg, AP_CREATING)) {
+ WL_ERR(("AP creates in progress, so skip this connection for creating AP.\n"));
+ err = -EBUSY;
+ goto fail;
+ }
#endif /* WL_DUAL_STA */
bzero(&assoc_info, sizeof(wlcfg_assoc_info_t));
if ((assoc_info.bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
@@ -9385,7 +9142,7 @@ wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
}
link_idx = wl_cfg80211_get_link_idx_by_bssid(cfg, dev, mac);
- WL_INFORM_MEM(("query bssid:"MACDBG" link_idx:%d\n", MAC2STRDBG(mac), link_idx));
+ WL_DBG(("query bssid:"MACDBG" link_idx:%d\n", MAC2STRDBG(mac), link_idx));
buf = MALLOC(cfg->osh, WLC_IOCTL_MEDLEN);
if (buf == NULL) {
@@ -13827,7 +13584,7 @@ wl_notify_connect_status_ibss(struct bcm_cfg80211 *cfg, struct net_device *ndev,
MAC2STRDBG((const u8 *)&e->addr)));
wl_get_assoc_ies(cfg, ndev);
wl_update_prof(cfg, ndev, NULL, (const void *)&e->addr, WL_PROF_BSSID);
- wl_update_bss_info(cfg, ndev, false, NULL, NULL);
+ wl_update_bss_info(cfg, ndev, false, NULL);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
cfg80211_ibss_joined(ndev, (const s8 *)&e->addr, channel, GFP_KERNEL);
#else
@@ -13841,7 +13598,7 @@ wl_notify_connect_status_ibss(struct bcm_cfg80211 *cfg, struct net_device *ndev,
wl_link_up(cfg);
wl_get_assoc_ies(cfg, ndev);
wl_update_prof(cfg, ndev, NULL, (const void *)&e->addr, WL_PROF_BSSID);
- wl_update_bss_info(cfg, ndev, false, NULL, NULL);
+ wl_update_bss_info(cfg, ndev, false, NULL);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
cfg80211_ibss_joined(ndev, (const s8 *)&e->addr, channel, GFP_KERNEL);
#else
@@ -15255,52 +15012,6 @@ wl_cfgvendor_advlog_connect_event(wl_assoc_status_t *as, bool query_rssi, int pr
}
}
-void
-wl_cfgvendor_advlog_disassoc_tx(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- uint32 reason, int rssi)
-{
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
- wl_assoc_status_t as;
- s32 ifidx = DHD_BAD_IF;
- u8 *curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
- struct ether_addr fw_bssid;
- int err;
-
- /* In DEAUTH_IND or Beacon loss cases, we already lost contact */
- bzero(&fw_bssid, sizeof(fw_bssid));
- err = wldev_ioctl_get(ndev, WLC_GET_BSSID, &fw_bssid, ETHER_ADDR_LEN);
- if (err) {
- WL_ERR(("not inform disassoc for already disconnected\n"));
- return;
- }
-
- if (!curbssid) {
- WL_ERR(("No bssid found\n"));
- return;
- }
-
- ifidx = dhd_net2idx(dhdp->info, ndev);
- /* Advanced Logging supports only STA mode */
- if (!DHD_IF_ROLE_STA(dhdp, ifidx)) {
- return;
- }
-
- bzero(&as, sizeof(wl_assoc_status_t));
- as.ndev = ndev;
- if (memcpy_s(as.addr, ETH_ALEN, curbssid, ETH_ALEN)) {
- WL_ERR(("failed to memcpy bssid\n"));
- return;
- }
-
- /* Nomally, FW sends WLC_E_DISASSOC event twice
- * to avoid printing twice, move it in WLC_DISASSOC sending path
- * Set WLC_E_DISASSOC forcely instead of WLC_DISASSOC
- */
- as.event_type = WLC_E_DISASSOC;
- as.reason = reason;
- wl_cfgvendor_advlog_connect_event(&as, FALSE, rssi);
-}
-
static s32
wl_cfgvendor_advlog_get_target_rssi(struct bcm_cfg80211 *cfg, struct net_device *ndev,
int *rssi)
@@ -16358,7 +16069,7 @@ static s32 wl_get_assoc_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev)
}
static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- bool update_ssid, u8 *target_bssid, const u8 *mld_mac)
+ bool update_ssid, u8 *target_bssid)
{
struct cfg80211_bss *bss;
wl_bss_info_v109_t *bi;
@@ -16374,114 +16085,150 @@ static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev,
char *buf;
u32 freq;
chanspec_t chspec = INVCHANSPEC;
+ u8 num_links = 1;
+ bool ml_conn = FALSE;
+ u8 index;
+ u8 link_idx = NON_ML_LINK;
+#ifdef WL_MLO
+ struct net_info *mld_netinfo = NULL;
+ wl_mlo_link_info_t mlinfo;
+ wl_mlo_link_t *linkinfo = NULL;
+ unsigned long flags;
+#endif /* WL_MLO */
wiphy = bcmcfg_to_wiphy(cfg);
ssid = (struct wlc_ssid *)wl_read_prof(cfg, ndev, WL_PROF_SSID);
- if (mld_mac) {
- WL_INFORM_MEM(("check for existing mld entry\n"));
- curbssid = mld_mac;
- } else if (target_bssid) {
- WL_INFORM_MEM(("Update bssinfo for target bssid\n"));
- curbssid = target_bssid;
- } else {
- WL_INFORM_MEM(("Update bssinfo for ASSOCIATED bssid\n"));
- curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
- }
- bss = CFG80211_GET_BSS(wiphy, NULL, curbssid,
- ssid->SSID, ssid->SSID_len);
+
buf = (char *)MALLOCZ(cfg->osh, WL_EXTRA_BUF_MAX);
if (!buf) {
WL_ERR(("buffer alloc failed.\n"));
return BCME_NOMEM;
}
- mutex_lock(&cfg->usr_sync);
- *(u32 *)buf = htod32(WL_EXTRA_BUF_MAX);
- if (target_bssid) {
- err = wldev_iovar_getbuf(ndev, "target_bss_info", NULL, 0,
- buf, WL_EXTRA_BUF_MAX, NULL);
- } else {
- err = wldev_ioctl_get(ndev, WLC_GET_BSS_INFO, buf, WL_EXTRA_BUF_MAX);
- }
- if (unlikely(err)) {
- WL_ERR(("Could not get bss info %d\n", err));
- goto update_bss_info_out;
- }
- bi = (wl_bss_info_v109_t *)(buf + 4);
- chspec = wl_chspec_driver_to_host(bi->chanspec);
- /* chanspec queried for ASSOCIATED BSSID needs to be valid */
- if (!(target_bssid) && !wf_chspec_valid(chspec)) {
- WL_ERR(("Invalid chanspec from get bss info %x\n", chspec));
- err = BCME_BADCHAN;
- goto update_bss_info_out;
+
+#ifdef WL_MLO
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ mld_netinfo = _wl_get_netinfo_by_wdev(cfg, ndev->ieee80211_ptr);
+ if (mld_netinfo && mld_netinfo->mlinfo.num_links) {
+ /* copy for local use */
+ (void)memcpy_s(&mlinfo, sizeof(mlinfo), &mld_netinfo->mlinfo, sizeof(mlinfo));
+ ml_conn = TRUE;
+ num_links = mld_netinfo->mlinfo.num_links;
}
- wl_update_prof(cfg, ndev, NULL, &chspec, WL_PROF_CHAN);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+#endif /* WL_MLO */
- if (!bss) {
- if (mld_mac) {
- if (memcpy_s(bi->BSSID.octet, ETHER_ADDR_LEN,
- mld_mac, ETHER_ADDR_LEN)) {
- WL_ERR(("Bssid copy failed\n"));
- err = -EIO;
- goto update_bss_info_out;
- }
- } else if (memcmp(bi->BSSID.octet, curbssid, ETHER_ADDR_LEN)) {
- WL_ERR(("Bssid doesn't match. curbssid:"MACDBG" bi->BSSID:"MACDBG"\n",
- MAC2STRDBG(curbssid), MAC2STRDBG(bi->BSSID.octet)));
- err = -EIO;
- goto update_bss_info_out;
+ mutex_lock(&cfg->usr_sync);
+ for (index = 0; index < num_links; index++) {
+ /*
+ * Target bssid is considered as priority as its needed in case of auth.
+ * Relying on ml link info does not help as it does not contain peer link addr.
+ * Even relying on WL_MLO_LINK_INFO_OPCODE_UPDATE does not help at this
+ * instance as even FW does not have peer link addr populated.
+ */
+ if (target_bssid) {
+ WL_INFORM_MEM(("Update bssinfo for target bssid\n"));
+ curbssid = target_bssid;
+ } else if (ml_conn) {
+#ifdef WL_MLO
+ linkinfo = &mlinfo.links[index];
+ curbssid = linkinfo->peer_link_addr;
+ link_idx = linkinfo->link_idx;
+ WL_INFORM_MEM(("Update bssinfo for linkidx %d peer_link_addr: "MACDBG"\n",
+ link_idx, MAC2STRDBG(curbssid)));
+#endif /* WL_MLO */
+ } else {
+ WL_INFORM_MEM(("Update bssinfo for ASSOCIATED bssid\n"));
+ curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+ }
+ bss = CFG80211_GET_BSS(wiphy, NULL, curbssid,
+ ssid->SSID, ssid->SSID_len);
+
+ *(u32 *)buf = htod32(WL_EXTRA_BUF_MAX);
+ if (target_bssid) {
+ err = wldev_iovar_getbuf(ndev, "target_bss_info", NULL, 0,
+ buf, WL_EXTRA_BUF_MAX, NULL);
+ } else {
+ err = wldev_link_ioctl_get(ndev, link_idx, WLC_GET_BSS_INFO,
+ buf, WL_EXTRA_BUF_MAX);
}
- err = wl_inform_single_bss(cfg, bi, update_ssid);
if (unlikely(err)) {
- WL_ERR(("Could not update the AP detail in cache\n"));
+ WL_ERR(("Could not get bss info %d\n", err));
+ goto update_bss_info_out;
+ }
+ bi = (wl_bss_info_v109_t *)(buf + 4);
+ chspec = wl_chspec_driver_to_host(bi->chanspec);
+ WL_INFORM_MEM(("link_idx %d chanspec %x\n", link_idx, chspec));
+ /* chanspec queried for ASSOCIATED BSSID needs to be valid */
+ if (!(target_bssid) && !wf_chspec_valid(chspec)) {
+ WL_ERR(("Invalid chanspec from get bss info %x\n", chspec));
+ err = BCME_BADCHAN;
goto update_bss_info_out;
}
- WL_INFORM_MEM(("Updated the AP " MACDBG " detail in cache\n",
- MAC2STRDBG(curbssid)));
- ie = ((u8 *)bi) + bi->ie_offset;
- ie_len = bi->ie_length;
- beacon_interval = cpu_to_le16(bi->beacon_period);
- } else {
- u16 channel;
- WL_INFORM_MEM(("Found AP in the cache - BSSID " MACDBG "\n",
- MAC2STRDBG(bss->bssid)));
- channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec));
- freq = wl_channel_to_frequency(channel, CHSPEC_BAND(bi->chanspec));
- bss->channel = ieee80211_get_channel(wiphy, freq);
+ if (!bss) {
+ if (memcmp(bi->BSSID.octet, curbssid, ETHER_ADDR_LEN)) {
+ WL_ERR(("Bssid doesn't match."
+ " curbssid:"MACDBG" bi->BSSID:"MACDBG"\n",
+ MAC2STRDBG(curbssid), MAC2STRDBG(bi->BSSID.octet)));
+ err = -EIO;
+ goto update_bss_info_out;
+ }
+ err = wl_inform_single_bss(cfg, bi, update_ssid);
+ if (unlikely(err)) {
+ WL_ERR(("Could not update the AP detail in cache\n"));
+ goto update_bss_info_out;
+ }
+
+ WL_INFORM_MEM(("Updated the AP " MACDBG " detail in cache\n",
+ MAC2STRDBG(curbssid)));
+ ie = ((u8 *)bi) + bi->ie_offset;
+ ie_len = bi->ie_length;
+ beacon_interval = cpu_to_le16(bi->beacon_period);
+ } else {
+ u16 channel;
+ WL_INFORM_MEM(("Found AP in the cache - BSSID " MACDBG "\n",
+ MAC2STRDBG(bss->bssid)));
+ channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec));
+ freq = wl_channel_to_frequency(channel, CHSPEC_BAND(bi->chanspec));
+ bss->channel = ieee80211_get_channel(wiphy, freq);
#if defined(WL_CFG80211_P2P_DEV_IF)
- ie = (const u8 *)bss->ies->data;
- ie_len = bss->ies->len;
+ ie = (const u8 *)bss->ies->data;
+ ie_len = bss->ies->len;
#else
- ie = bss->information_elements;
- ie_len = bss->len_information_elements;
+ ie = bss->information_elements;
+ ie_len = bss->len_information_elements;
#endif /* WL_CFG80211_P2P_DEV_IF */
- beacon_interval = bss->beacon_interval;
+ beacon_interval = bss->beacon_interval;
- CFG80211_PUT_BSS(wiphy, bss);
- }
+ CFG80211_PUT_BSS(wiphy, bss);
+ }
- tim = bcm_parse_tlvs(ie, ie_len, WLAN_EID_TIM);
- if (tim) {
- dtim_period = *(tim->data + 1);
- } else {
- /*
- * active scan was done so we could not get dtim
- * information out of probe response.
- * so we speficially query dtim information.
- */
- dtim_period = 0;
- err = wldev_ioctl_get(ndev, WLC_GET_DTIMPRD,
- &dtim_period, sizeof(dtim_period));
- if (unlikely(err)) {
- WL_ERR(("WLC_GET_DTIMPRD error (%d)\n", err));
- goto update_bss_info_out;
+ if ((link_idx == 0) || (link_idx == NON_ML_LINK)) {
+ tim = bcm_parse_tlvs(ie, ie_len, WLAN_EID_TIM);
+ if (tim) {
+ dtim_period = *(tim->data + 1);
+ } else {
+ /*
+ * active scan was done so we could not get dtim
+ * information out of probe response.
+ * so we speficially query dtim information.
+ */
+ dtim_period = 0;
+ err = wldev_ioctl_get(ndev, WLC_GET_DTIMPRD,
+ &dtim_period, sizeof(dtim_period));
+ if (unlikely(err)) {
+ WL_ERR(("WLC_GET_DTIMPRD error (%d)\n", err));
+ goto update_bss_info_out;
+ }
+ }
+
+ wl_update_prof(cfg, ndev, NULL, &chspec, WL_PROF_CHAN);
+ wl_update_prof(cfg, ndev, NULL, &beacon_interval, WL_PROF_BEACONINT);
+ wl_update_prof(cfg, ndev, NULL, &dtim_period, WL_PROF_DTIMPERIOD);
}
}
- wl_update_prof(cfg, ndev, NULL, &beacon_interval, WL_PROF_BEACONINT);
- wl_update_prof(cfg, ndev, NULL, &dtim_period, WL_PROF_DTIMPERIOD);
-
update_bss_info_out:
if (unlikely(err)) {
WL_ERR(("Failed with error %d\n", err));
@@ -16578,7 +16325,7 @@ wl_bss_roaming_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
}
curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
- if ((err = wl_update_bss_info(cfg, ndev, true, NULL, NULL)) != BCME_OK) {
+ if ((err = wl_update_bss_info(cfg, ndev, true, NULL)) != BCME_OK) {
WL_ERR(("failed to update bss info, err=%d\n", err));
goto fail;
}
@@ -16891,7 +16638,7 @@ wl_fillup_conn_resp_params(struct bcm_cfg80211 *cfg, struct net_device *ndev,
link->peer_link_addr,
ssid->SSID, ssid->SSID_len);
resp_params->valid_links |= BIT(link_id);
- WL_INFORM_MEM(("peer_link_addr:" MACDBG " link_addr:" MACDBG " index:%d\n",
+ WL_INFORM_MEM(("peer_link_addr:" MACDBG " link_addr:" MACDBG "link_id:%d\n",
MAC2STRDBG((const u8*)(link->peer_link_addr)),
MAC2STRDBG((const u8*)(link->link_addr)), link_id));
if (!resp_params->links[link_id].bss && (status == WLAN_STATUS_SUCCESS)) {
@@ -17029,7 +16776,7 @@ wl_bss_connect_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
* For cases, there is no match available,
* need to update the cache based on bss info from fw.
*/
- if ((err = wl_update_bss_info(cfg, ndev, true, NULL, NULL)) != BCME_OK) {
+ if ((err = wl_update_bss_info(cfg, ndev, true, NULL)) != BCME_OK) {
WL_ERR(("failed to update bss info, err=%d\n", err));
goto exit;
}
@@ -20735,7 +20482,8 @@ static s32 __wl_cfg80211_up(struct bcm_cfg80211 *cfg)
ret = wldev_iovar_getbuf(ndev, "scan_ver", NULL, 0, ioctl_buf, sizeof(ioctl_buf), NULL);
if (ret == BCME_OK) {
wl_scan_version_t *ver = (wl_scan_version_t *)ioctl_buf;
- if ((ver->scan_ver_major == SCAN_PARAMS_VER_3) ||
+ if ((ver->scan_ver_major == SCAN_PARAMS_VER_4) ||
+ (ver->scan_ver_major == SCAN_PARAMS_VER_3) ||
(ver->scan_ver_major == SCAN_PARAMS_VER_2)) {
/* v2 and v3 structs are of same size just that a pad variable
* has been changed to ssid type for 6G use. That will variable
@@ -26846,13 +26594,12 @@ wl_notify_start_auth(struct bcm_cfg80211 *cfg,
* kernel is ML aware.
*/
WL_INFORM_MEM(("[MLO] Create MLD bss entry\n"));
- wl_update_bss_info(cfg, ndev, false, evt_data->bssid.octet,
- e->addr.octet);
+ wl_update_bss_info(cfg, ndev, false, evt_data->bssid.octet);
} else
#endif /* WL_MLO */
if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
/* Make sure bss_info is updated in roam case */
- wl_update_bss_info(cfg, ndev, false, evt_data->bssid.octet, NULL);
+ wl_update_bss_info(cfg, ndev, false, evt_data->bssid.octet);
}
ext_auth_param.ssid.ssid_len = MIN(evt_data->ssid.SSID_len, DOT11_MAX_SSID_LEN);
diff --git a/wl_cfg80211.h b/wl_cfg80211.h
index b818b09..eea7d29 100644
--- a/wl_cfg80211.h
+++ b/wl_cfg80211.h
@@ -3448,9 +3448,9 @@ extern s32 wl_cfg80211_pause_sdo(struct net_device *dev, struct bcm_cfg80211 *cf
extern s32 wl_cfg80211_resume_sdo(struct net_device *dev, struct bcm_cfg80211 *cfg);
#endif
+#define CHANINFO_LIST_BUF_SIZE (1024 * 4)
#ifdef WL_SUPPORT_AUTO_CHANNEL
#define CHANSPEC_BUF_SIZE 2048
-#define CHANINFO_LIST_BUF_SIZE (1024 * 4)
#define CHAN_SEL_IOCTL_DELAY 300
#define CHAN_SEL_RETRY_COUNT 15
#define CHANNEL_IS_RADAR(channel) (((channel & WL_CHAN_RADAR) || \
diff --git a/wl_cfgp2p.c b/wl_cfgp2p.c
index f137ae4..bb03815 100644
--- a/wl_cfgp2p.c
+++ b/wl_cfgp2p.c
@@ -62,6 +62,30 @@
#include <dhd_bus.h>
#endif /* defined(BCMDONGLEHOST) */
+#define MAX_WAIT_TIME 1500
+
+#if !defined(BCMDONGLEHOST)
+#ifdef ntoh32
+#undef ntoh32
+#endif
+#ifdef ntoh16
+#undef ntoh16
+#endif
+#ifdef htod32
+#undef htod32
+#endif
+#ifdef htod16
+#undef htod16
+#endif
+#define ntoh32(i) (i)
+#define ntoh16(i) (i)
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define DNGL_FUNC(func, parameters)
+#else
+#define DNGL_FUNC(func, parameters) func parameters
+#endif /* defined(BCMDONGLEHOST) */
+
static s8 scanparambuf[WLC_IOCTL_MEDLEN];
static bool wl_cfgp2p_has_ie(const bcm_tlv_t *ie, const u8 **tlvs, u32 *tlvs_len,
const u8 *oui, u32 oui_len, u8 type);
@@ -995,6 +1019,7 @@ wl_cfgp2p_escan(struct bcm_cfg80211 *cfg, struct net_device *dev, u16 active_sca
wl_p2p_scan_t *p2p_params;
wl_escan_params_v1_t *eparams;
wl_escan_params_v3_t *eparams_v3;
+ wl_escan_params_v4_t *eparams_v4;
wlc_ssid_t ssid;
u32 sync_id = 0;
s32 nprobes = 0;
@@ -1005,7 +1030,11 @@ wl_cfgp2p_escan(struct bcm_cfg80211 *cfg, struct net_device *dev, u16 active_sca
pri_dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
/* Allocate scan params which need space for 3 channels and 0 ssids */
- if (IS_SCAN_PARAMS_V3_V2(cfg)) {
+ if (IS_SCAN_PARAMS_V4(cfg)) {
+ eparams_size = (WL_SCAN_PARAMS_V4_FIXED_SIZE +
+ OFFSETOF(wl_escan_params_v4_t, params)) +
+ num_chans * sizeof(eparams->params.channel_list[0]);
+ } else if (IS_SCAN_PARAMS_V3_V2(cfg)) {
eparams_size = (WL_SCAN_PARAMS_V3_FIXED_SIZE +
OFFSETOF(wl_escan_params_v3_t, params)) +
num_chans * sizeof(eparams->params.channel_list[0]);
@@ -1095,7 +1124,32 @@ wl_cfgp2p_escan(struct bcm_cfg80211 *cfg, struct net_device *dev, u16 active_sca
wl_escan_set_sync_id(sync_id, cfg);
/* Fill in the Scan structure that follows the P2P scan structure */
- if (IS_SCAN_PARAMS_V3_V2(cfg)) {
+ if (IS_SCAN_PARAMS_V4(cfg)) {
+ eparams_v4 = (wl_escan_params_v4_t*) (p2p_params + 1);
+ eparams_v4->version = htod16(cfg->scan_params_ver);
+ eparams_v4->action = htod16(action);
+ eparams_v4->params.version = htod16(cfg->scan_params_ver);
+ eparams_v4->params.length = htod16(sizeof(wl_scan_params_v4_t));
+ eparams_v4->params.bss_type = DOT11_BSSTYPE_ANY;
+ eparams_v4->params.scan_type = htod32(scan_type);
+ eparams_v4->params.scan_type_ext = 0;
+ (void)memcpy_s(&eparams_v4->params.bssid, ETHER_ADDR_LEN, mac_addr, ETHER_ADDR_LEN);
+ eparams_v4->params.home_time = htod32(P2PAPI_SCAN_HOME_TIME_MS);
+ eparams_v4->params.active_time = htod32(active_time);
+ eparams_v4->params.nprobes = htod32(nprobes);
+ eparams_v4->params.passive_time = htod32(-1);
+ eparams_v4->sync_id = sync_id;
+ for (i = 0; i < num_chans; i++) {
+ eparams_v4->params.channel_list[i] =
+ wl_chspec_host_to_driver(channels[i]);
+ }
+ eparams_v4->params.channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
+ (num_chans & WL_SCAN_PARAMS_COUNT_MASK));
+ if (ssid.SSID_len)
+ (void)memcpy_s(&eparams_v4->params.ssid,
+ sizeof(wlc_ssid_t), &ssid, sizeof(wlc_ssid_t));
+ sync_id = eparams_v4->sync_id;
+ } else if (IS_SCAN_PARAMS_V3_V2(cfg)) {
eparams_v3 = (wl_escan_params_v3_t*) (p2p_params + 1);
eparams_v3->version = htod16(cfg->scan_params_ver);
eparams_v3->action = htod16(action);
@@ -2918,3 +2972,257 @@ wl_cfgp2p_is_p2p_specific_scan(struct cfg80211_scan_request *request)
}
return false;
}
+
+struct wireless_dev *
+wl_cfgp2p_if_add(struct bcm_cfg80211 *cfg, wl_iftype_t wl_iftype,
+ char const *name, u8 *mac_addr, s32 *ret_err)
+{
+ u16 chspec;
+ s16 cfg_type;
+ long timeout;
+ s32 err;
+ u16 p2p_iftype;
+ int dhd_mode;
+ struct net_device *new_ndev = NULL;
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ struct ether_addr *p2p_addr;
+
+ *ret_err = BCME_OK;
+ if (!cfg->p2p) {
+ WL_ERR(("p2p not initialized\n"));
+ return NULL;
+ }
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ if (wl_iftype == WL_IF_TYPE_P2P_DISC) {
+ /* Handle Dedicated P2P discovery Interface */
+ return wl_cfgp2p_add_p2p_disc_if(cfg);
+ }
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+ if (wl_iftype == WL_IF_TYPE_P2P_GO) {
+ p2p_iftype = WL_P2P_IF_GO;
+ } else {
+ p2p_iftype = WL_P2P_IF_CLIENT;
+ }
+
+ /* Dual p2p doesn't support multiple P2PGO interfaces,
+ * p2p_go_count is the counter for GO creation
+ * requests.
+ */
+ if ((cfg->p2p->p2p_go_count > 0) && (wl_iftype == WL_IF_TYPE_P2P_GO)) {
+ WL_ERR(("FW does not support multiple GO\n"));
+ *ret_err = -ENOTSUPP;
+ return NULL;
+ }
+ if (!cfg->p2p->on) {
+ p2p_on(cfg) = true;
+ wl_cfgp2p_set_firm_p2p(cfg);
+ wl_cfgp2p_init_discovery(cfg);
+ }
+
+ strlcpy(cfg->p2p->vir_ifname, name, sizeof(cfg->p2p->vir_ifname));
+ /* In concurrency case, STA may be already associated in a particular channel.
+ * so retrieve the current channel of primary interface and then start the virtual
+ * interface on that.
+ */
+ chspec = wl_cfg80211_get_shared_freq(wiphy);
+
+ /* For P2P mode, use P2P-specific driver features to create the
+ * bss: "cfg p2p_ifadd"
+ */
+ wl_set_p2p_status(cfg, IF_ADDING);
+ bzero(&cfg->if_event_info, sizeof(cfg->if_event_info));
+ cfg_type = wl_cfgp2p_get_conn_idx(cfg);
+ if (cfg_type < BCME_OK) {
+ wl_clr_p2p_status(cfg, IF_ADDING);
+ WL_ERR(("Failed to get connection idx for p2p interface"
+ ", error code = %d", cfg_type));
+ return NULL;
+ }
+
+ p2p_addr = wl_to_p2p_bss_macaddr(cfg, cfg_type);
+ (void)memcpy_s(p2p_addr->octet, ETH_ALEN, mac_addr, ETH_ALEN);
+
+ err = wl_cfgp2p_ifadd(cfg, p2p_addr,
+ htod32(p2p_iftype), chspec);
+ if (unlikely(err)) {
+ wl_clr_p2p_status(cfg, IF_ADDING);
+ WL_ERR((" virtual iface add failed (%d) \n", err));
+ return NULL;
+ }
+
+ /* Wait for WLC_E_IF event with IF_ADD opcode */
+ timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+ ((wl_get_p2p_status(cfg, IF_ADDING) == false) &&
+ (cfg->if_event_info.valid)),
+ msecs_to_jiffies(MAX_WAIT_TIME));
+ if (timeout > 0 && !wl_get_p2p_status(cfg, IF_ADDING) && cfg->if_event_info.valid) {
+ wl_if_event_info *event = &cfg->if_event_info;
+ new_ndev = wl_cfg80211_post_ifcreate(bcmcfg_to_prmry_ndev(cfg), event,
+ event->mac, cfg->p2p->vir_ifname, false);
+ if (unlikely(!new_ndev)) {
+ goto fail;
+ }
+
+ if (wl_iftype == WL_IF_TYPE_P2P_GO) {
+ cfg->p2p->p2p_go_count++;
+ }
+ /* Fill p2p specific data */
+ wl_to_p2p_bss_ndev(cfg, cfg_type) = new_ndev;
+ wl_to_p2p_bss_bssidx(cfg, cfg_type) = event->bssidx;
+
+ WL_ERR((" virtual interface(%s) is "
+ "created net attach done\n", cfg->p2p->vir_ifname));
+#if defined(BCMDONGLEHOST)
+ dhd_mode = (wl_iftype == WL_IF_TYPE_P2P_GC) ?
+ DHD_FLAG_P2P_GC_MODE : DHD_FLAG_P2P_GO_MODE;
+ DNGL_FUNC(dhd_cfg80211_set_p2p_info, (cfg, dhd_mode));
+#endif /* defined(BCMDONGLEHOST) */
+ /* reinitialize completion to clear previous count */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
+ INIT_COMPLETION(cfg->iface_disable);
+#else
+ init_completion(&cfg->iface_disable);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) */
+
+ return new_ndev->ieee80211_ptr;
+ }
+
+fail:
+ return NULL;
+}
+
+s32
+wl_cfgp2p_if_del(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ s16 bssidx;
+ s16 err;
+ s32 cfg_type;
+ struct net_device *ndev;
+ long timeout;
+ struct ether_addr p2p_dev_addr = {{0}};
+
+ if (unlikely(!wl_get_drv_status(cfg, READY, bcmcfg_to_prmry_ndev(cfg)))) {
+ WL_INFORM_MEM(("device is not ready\n"));
+ return BCME_NOTFOUND;
+ }
+
+#ifdef WL_CFG80211_P2P_DEV_IF
+ if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
+ /* Handle dedicated P2P discovery interface. */
+ return wl_cfgp2p_del_p2p_disc_if(wdev, cfg);
+ }
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+ /* Handle P2P Group Interface */
+ bssidx = wl_get_bssidx_by_wdev(cfg, wdev);
+ if (bssidx <= 0) {
+ WL_ERR(("bssidx not found\n"));
+ return BCME_NOTFOUND;
+ }
+ if (wl_cfgp2p_find_type(cfg, bssidx, &cfg_type) != BCME_OK) {
+ /* Couldn't find matching iftype */
+ WL_MEM(("non P2P interface\n"));
+ return BCME_NOTFOUND;
+ }
+
+ ndev = wdev->netdev;
+ (void)memcpy_s(p2p_dev_addr.octet, ETHER_ADDR_LEN,
+ ndev->dev_addr, ETHER_ADDR_LEN);
+
+ wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+ wl_clr_p2p_status(cfg, IF_ADDING);
+
+ /* for GO */
+ if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) {
+ wl_add_remove_eventmsg(ndev, WLC_E_PROBREQ_MSG, false);
+ cfg->p2p->p2p_go_count--;
+ /* disable interface before bsscfg free */
+ err = wl_cfgp2p_ifdisable(cfg, &p2p_dev_addr);
+ /* if fw doesn't support "ifdis",
+ do not wait for link down of ap mode
+ */
+ if (err == 0) {
+ WL_ERR(("Wait for Link Down event for GO !!!\n"));
+ wait_for_completion_timeout(&cfg->iface_disable,
+ msecs_to_jiffies(500));
+ } else if (err != BCME_UNSUPPORTED) {
+ msleep(300);
+ }
+ } else {
+ /* GC case */
+ if (wl_get_drv_status(cfg, DISCONNECTING, ndev)) {
+ WL_ERR(("Wait for Link Down event for GC !\n"));
+ wait_for_completion_timeout
+ (&cfg->iface_disable, msecs_to_jiffies(500));
+ }
+
+ /* Force P2P disconnect in iface down context */
+ if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
+ WL_INFORM_MEM(("force send disconnect event\n"));
+ CFG80211_DISCONNECTED(ndev, 0, NULL, 0, false, GFP_KERNEL);
+ wl_clr_drv_status(cfg, AUTHORIZED, ndev);
+ }
+ }
+
+ bzero(&cfg->if_event_info, sizeof(cfg->if_event_info));
+ wl_set_p2p_status(cfg, IF_DELETING);
+ DNGL_FUNC(dhd_cfg80211_clean_p2p_info, (cfg));
+
+ err = wl_cfgp2p_ifdel(cfg, &p2p_dev_addr);
+ if (unlikely(err)) {
+ WL_ERR(("P2P IFDEL operation failed, error code = %d\n", err));
+ err = BCME_ERROR;
+ goto fail;
+ } else {
+ /* Wait for WLC_E_IF event */
+ timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+ ((wl_get_p2p_status(cfg, IF_DELETING) == false) &&
+ (cfg->if_event_info.valid)),
+ msecs_to_jiffies(MAX_WAIT_TIME));
+ if (timeout > 0 && !wl_get_p2p_status(cfg, IF_DELETING) &&
+ cfg->if_event_info.valid) {
+ WL_ERR(("P2P IFDEL operation done\n"));
+ err = BCME_OK;
+ } else {
+ WL_ERR(("IFDEL didn't complete properly\n"));
+ err = -EINVAL;
+ }
+ }
+
+fail:
+ /* Even in failure case, attempt to remove the host data structure.
+ * Firmware would be cleaned up via WiFi reset done by the
+ * user space from hang event context (for android only).
+ */
+ bzero(cfg->p2p->vir_ifname, IFNAMSIZ);
+ wl_to_p2p_bss_bssidx(cfg, cfg_type) = -1;
+ wl_to_p2p_bss_ndev(cfg, cfg_type) = NULL;
+ wl_clr_drv_status(cfg, CONNECTED, wl_to_p2p_bss_ndev(cfg, cfg_type));
+
+ /* Clear our saved WPS and P2P IEs for the discovery BSS */
+ wl_cfg80211_clear_p2p_disc_ies(cfg);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
+ if (cfg->wiphy_lock_held) {
+ schedule_delayed_work(&cfg->remove_iface_work, 0);
+ } else
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */
+ {
+ WL_DBG_MEM(("Did not Schedule remove iface work\n"));
+#ifdef BCMDONGLEHOST
+ dhd_net_if_lock(ndev);
+#endif /* BCMDONGLEHOST */
+ if (cfg->if_event_info.ifidx) {
+ /* Remove interface except for primary ifidx */
+ wl_cfg80211_remove_if(cfg, cfg->if_event_info.ifidx, ndev, FALSE);
+ }
+#ifdef BCMDONGLEHOST
+ dhd_net_if_unlock(ndev);
+#endif /* BCMDONGLEHOST */
+ }
+
+ return err;
+}
diff --git a/wl_cfgscan.c b/wl_cfgscan.c
index ed8368d..cb0b476 100644
--- a/wl_cfgscan.c
+++ b/wl_cfgscan.c
@@ -1735,6 +1735,7 @@ wl_scan_prep(struct bcm_cfg80211 *cfg, void *scan_params, u32 len,
{
wl_scan_params_v1_t *params = NULL;
wl_scan_params_v3_t *params_v3 = NULL;
+ wl_scan_params_v4_t *params_v4 = NULL;
u32 scan_type = 0;
u32 scan_param_size = 0;
u32 n_channels = 0;
@@ -1753,7 +1754,11 @@ wl_scan_prep(struct bcm_cfg80211 *cfg, void *scan_params, u32 len,
}
WL_DBG(("Preparing Scan request\n"));
- if (IS_SCAN_PARAMS_V3_V2(cfg)) {
+ if (IS_SCAN_PARAMS_V4(cfg)) {
+ params_v4 = (wl_scan_params_v4_t *)scan_params;
+ scan_param_size = sizeof(wl_scan_params_v4_t);
+ channel_offset = offsetof(wl_scan_params_v4_t, channel_list);
+ } else if (IS_SCAN_PARAMS_V3_V2(cfg)) {
params_v3 = (wl_scan_params_v3_t *)scan_params;
scan_param_size = sizeof(wl_scan_params_v3_t);
channel_offset = offsetof(wl_scan_params_v3_t, channel_list);
@@ -1763,7 +1768,33 @@ wl_scan_prep(struct bcm_cfg80211 *cfg, void *scan_params, u32 len,
channel_offset = offsetof(wl_scan_params_v1_t, channel_list);
}
- if (params_v3) {
+ if (params_v4) {
+ /* scan params ver4 */
+#if defined(WL_SCAN_TYPE)
+ scan_type += wl_cfgscan_map_nl80211_scan_type(cfg, request);
+#endif /* WL_SCAN_TYPE */
+ (void)memcpy_s(&params_v4->bssid, ETHER_ADDR_LEN, &ether_bcast, ETHER_ADDR_LEN);
+ params_v4->version = htod16(cfg->scan_params_ver);
+ params_v4->length = htod16(sizeof(wl_scan_params_v4_t));
+ params_v4->bss_type = DOT11_BSSTYPE_ANY;
+ params_v4->scan_type = htod32(scan_type);
+ params_v4->scan_type_ext = 0;
+ params_v4->nprobes = htod32(-1);
+ params_v4->active_time = htod32(-1);
+ params_v4->passive_time = htod32(-1);
+ params_v4->home_time = htod32(-1);
+ params_v4->channel_num = 0;
+ bzero(&params_v4->ssid, sizeof(wlc_ssid_t));
+ chan_list = params_v4->channel_list;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) && defined(DHD_SCAN_INC_RNR)
+ /* scan for colocated APs reported by 2.4/5 GHz APs */
+ if (request->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ)
+#endif
+ {
+ params_v4->ssid_type |= WL_SCAN_INC_RNR;
+ }
+ } else if (params_v3) {
/* scan params ver3 */
#if defined(WL_SCAN_TYPE)
scan_type += wl_cfgscan_map_nl80211_scan_type(cfg, request);
@@ -1840,7 +1871,14 @@ wl_scan_prep(struct bcm_cfg80211 *cfg, void *scan_params, u32 len,
u32 channel_num =
htod32((n_ssids << WL_SCAN_PARAMS_NSSID_SHIFT) |
(n_channels & WL_SCAN_PARAMS_COUNT_MASK));
- if (params_v3) {
+ if (params_v4) {
+ params_v4->channel_num = channel_num;
+ if (n_channels == 1) {
+ params_v4->active_time = htod32(WL_SCAN_CONNECT_DWELL_TIME_MS);
+ params_v4->nprobes = htod32(
+ params_v4->active_time / WL_SCAN_JOIN_PROBE_INTERVAL_MS);
+ }
+ } else if (params_v3) {
params_v3->channel_num = channel_num;
if (n_channels == 1) {
params_v3->active_time = htod32(WL_SCAN_CONNECT_DWELL_TIME_MS);
@@ -1929,6 +1967,7 @@ wl_run_escan(struct bcm_cfg80211 *cfg, struct net_device *ndev,
s32 params_size;
wl_escan_params_v1_t *eparams = NULL;
wl_escan_params_v3_t *eparams_v3 = NULL;
+ wl_escan_params_v4_t *eparams_v4 = NULL;
u8 *scan_params = NULL;
u8 *params = NULL;
s32 search_state = WL_P2P_DISC_ST_SCAN;
@@ -1950,7 +1989,10 @@ wl_run_escan(struct bcm_cfg80211 *cfg, struct net_device *ndev,
goto exit;
}
- if (IS_SCAN_PARAMS_V3_V2(cfg)) {
+ if (IS_SCAN_PARAMS_V4(cfg)) {
+ params_size = (WL_SCAN_PARAMS_V4_FIXED_SIZE +
+ OFFSETOF(wl_escan_params_v4_t, params));
+ } else if (IS_SCAN_PARAMS_V3_V2(cfg)) {
params_size = (WL_SCAN_PARAMS_V3_FIXED_SIZE +
OFFSETOF(wl_escan_params_v3_t, params));
} else {
@@ -2041,7 +2083,13 @@ wl_run_escan(struct bcm_cfg80211 *cfg, struct net_device *ndev,
}
wl_escan_set_sync_id(sync_id, cfg);
- if (IS_SCAN_PARAMS_V3_V2(cfg)) {
+ if (IS_SCAN_PARAMS_V4(cfg)) {
+ eparams_v4 = (wl_escan_params_v4_t *)params;
+ scan_params = (u8 *)&eparams_v4->params;
+ eparams_v4->version = htod32(cfg->scan_params_ver);
+ eparams_v4->action = htod16(action);
+ eparams_v4->sync_id = sync_id;
+ } else if (IS_SCAN_PARAMS_V3_V2(cfg)) {
eparams_v3 = (wl_escan_params_v3_t *)params;
scan_params = (u8 *)&eparams_v3->params;
eparams_v3->version = htod32(cfg->scan_params_ver);
@@ -2064,7 +2112,9 @@ wl_run_escan(struct bcm_cfg80211 *cfg, struct net_device *ndev,
#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
/* Override active_time to reduce scan time if it's first bradcast scan. */
if (is_first_init_2g_scan) {
- if (eparams_v3) {
+ if (eparams_v4) {
+ eparams_v4->params.active_time = FIRST_SCAN_ACTIVE_DWELL_TIME_MS;
+ } else if (eparams_v3) {
eparams_v3->params.active_time = FIRST_SCAN_ACTIVE_DWELL_TIME_MS;
} else {
eparams->params.active_time = FIRST_SCAN_ACTIVE_DWELL_TIME_MS;
@@ -2867,7 +2917,9 @@ static void wl_cfgscan_scan_abort(struct bcm_cfg80211 *cfg)
u32 channel, channel_num;
/* Abort scan params only need space for 1 channel and 0 ssids */
- if (IS_SCAN_PARAMS_V3_V2(cfg)) {
+ if (IS_SCAN_PARAMS_V4(cfg)) {
+ params_size = WL_SCAN_PARAMS_V4_FIXED_SIZE + (1 * sizeof(uint16));
+ } else if (IS_SCAN_PARAMS_V3_V2(cfg)) {
params_size = WL_SCAN_PARAMS_V3_FIXED_SIZE + (1 * sizeof(uint16));
} else {
params_size = WL_SCAN_PARAMS_V1_FIXED_SIZE + (1 * sizeof(uint16));
@@ -2883,7 +2935,12 @@ static void wl_cfgscan_scan_abort(struct bcm_cfg80211 *cfg)
channel = htodchanspec(-1);
channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
(1 & WL_SCAN_PARAMS_COUNT_MASK));
- if (IS_SCAN_PARAMS_V3_V2(cfg)) {
+ if (IS_SCAN_PARAMS_V4(cfg)) {
+ wl_scan_params_v4_t *params_v4 = (wl_scan_params_v4_t *)params;
+ params_v4->channel_list[0] = channel;
+ params_v4->channel_num = channel_num;
+ params_v4->length = htod16(sizeof(wl_scan_params_v4_t));
+ } else if (IS_SCAN_PARAMS_V3_V2(cfg)) {
wl_scan_params_v3_t *params_v3 = (wl_scan_params_v3_t *)params;
params_v3->channel_list[0] = channel;
params_v3->channel_num = channel_num;
@@ -5181,11 +5238,30 @@ wl_init_scan_params(struct bcm_cfg80211 *cfg, u8 *params, u16 params_size,
u32 sync_id = 0;
wl_escan_params_v1_t *eparams = NULL;
wl_escan_params_v3_t *eparams_v3 = NULL;
+ wl_escan_params_v4_t *eparams_v4 = NULL;
wl_scan_params_v1_t *scanparams = NULL;
wl_scan_params_v3_t *scanparams_v3 = NULL;
+ wl_scan_params_v4_t *scanparams_v4 = NULL;
wl_escan_set_sync_id(sync_id, cfg);
- if (IS_SCAN_PARAMS_V3_V2(cfg)) {
+ if (IS_SCAN_PARAMS_V4(cfg)) {
+ eparams_v4 = (wl_escan_params_v4_t *)params;
+ eparams_v4->version = htod32(cfg->scan_params_ver);
+ eparams_v4->action = htod16(action);
+ eparams_v4->sync_id = sync_id;
+ scanparams_v4 = (wl_scan_params_v4_t *)&eparams_v4->params;
+ (void)memcpy_s(&scanparams_v4->bssid, ETHER_ADDR_LEN, &ether_bcast, ETHER_ADDR_LEN);
+ scanparams_v4->version = htod16(WL_SCAN_PARAMS_VERSION_V4);
+ scanparams_v4->length = htod16(sizeof(wl_scan_params_v4_t));
+ scanparams_v4->bss_type = DOT11_BSSTYPE_ANY;
+ scanparams_v4->scan_type = htod32(scan_type);
+ scanparams_v4->scan_type_ext = 0;
+ scanparams_v4->nprobes = htod32(-1);
+ scanparams_v4->active_time = htod32(-1);
+ scanparams_v4->passive_time = htod32(passive_time);
+ scanparams_v4->home_time = htod32(-1);
+ bzero(&scanparams_v4->ssid, sizeof(wlc_ssid_t));
+ } else if (IS_SCAN_PARAMS_V3_V2(cfg)) {
eparams_v3 = (wl_escan_params_v3_t *)params;
eparams_v3->version = htod32(cfg->scan_params_ver);
eparams_v3->action = htod16(action);
@@ -5262,8 +5338,10 @@ wl_cfgscan_listen_on_channel(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev
u8 *params = NULL;
wl_escan_params_v1_t *eparams = NULL;
wl_escan_params_v3_t *eparams_v3 = NULL;
+ wl_escan_params_v4_t *eparams_v4 = NULL;
wl_scan_params_v1_t *scanparams = NULL;
wl_scan_params_v3_t *scanparams_v3 = NULL;
+ wl_scan_params_v4_t *scanparams_v4 = NULL;
u16 *chanspec_list = NULL;
u32 channel_num = 0, scan_type = 0;
@@ -5294,7 +5372,10 @@ wl_cfgscan_listen_on_channel(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev
/* Use primary ndev for netless dev. BSSIDX will point to right I/F */
ndev = wdev->netdev ? wdev->netdev : bcmcfg_to_prmry_ndev(cfg);
- if (IS_SCAN_PARAMS_V3_V2(cfg)) {
+ if (IS_SCAN_PARAMS_V4(cfg)) {
+ params_size = (WL_SCAN_PARAMS_V4_FIXED_SIZE +
+ OFFSETOF(wl_escan_params_v4_t, params));
+ } else if (IS_SCAN_PARAMS_V3_V2(cfg)) {
params_size = (WL_SCAN_PARAMS_V3_FIXED_SIZE +
OFFSETOF(wl_escan_params_v3_t, params));
} else {
@@ -5322,7 +5403,12 @@ wl_cfgscan_listen_on_channel(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev
scan_type, WL_SCAN_ACTION_START, dwell);
channel_num = (chanspec_num & WL_SCAN_PARAMS_COUNT_MASK);
- if (IS_SCAN_PARAMS_V3_V2(cfg)) {
+ if (IS_SCAN_PARAMS_V4(cfg)) {
+ eparams_v4 = (wl_escan_params_v4_t *)params;
+ scanparams_v4 = (wl_scan_params_v4_t *)&eparams_v4->params;
+ chanspec_list = scanparams_v4->channel_list;
+ scanparams_v4->channel_num = channel_num;
+ } else if (IS_SCAN_PARAMS_V3_V2(cfg)) {
eparams_v3 = (wl_escan_params_v3_t *)params;
scanparams_v3 = (wl_scan_params_v3_t *)&eparams_v3->params;
chanspec_list = scanparams_v3->channel_list;
@@ -6969,6 +7055,39 @@ wl_get_ap_chanspecs(struct bcm_cfg80211 *cfg, wl_ap_oper_data_t *ap_data)
}
}
+bool
+wl_cfgscan_chaninfo_restricted(struct bcm_cfg80211 *cfg,
+ struct net_device *dev, u32 chan_info, chanspec_t chspec)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+
+ /* common restrictions for GO/AP */
+ if (chan_info & (WL_CHAN_RADAR | WL_CHAN_PASSIVE)) {
+ WL_INFORM_MEM(("radar/passive restricted chspec:0x%x chaninfo:0x%x\n",
+ chspec, chan_info));
+ return TRUE;
+ }
+
+ if (chan_info & (WL_CHAN_RESTRICTED | WL_CHAN_CLM_RESTRICTED)) {
+ WL_INFORM_MEM(("restricted ch_spec:0x%x chan_info:0x%x\n", chspec, chan_info));
+ return TRUE;
+ }
+
+ if (chan_info & WL_CHAN_INDOOR_ONLY) {
+ WL_INFORM_MEM(("Indoor restricted chan_info:0x%x\n", chan_info));
+ return TRUE;
+ }
+
+ if (IS_P2P_GO(wdev)) {
+ if (chan_info & WL_CHAN_P2P_PROHIBITED) {
+ WL_INFORM_MEM(("P2P prohibited for chspec:0x%x\n", chspec));
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
bool wl_is_chanspec_restricted(struct bcm_cfg80211 *cfg, chanspec_t sta_chanspec)
{
struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
@@ -7171,6 +7290,10 @@ wl_is_6g_restricted(struct bcm_cfg80211 *cfg, chanspec_t chspec)
u8 buf[WLC_IOCTL_SMLEN];
struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ if (!wf_chspec_valid(chspec)) {
+ return TRUE;
+ }
+
ch = (u32)chspec;
err = wldev_iovar_getbuf_bsscfg(ndev, "per_chan_info", (void *)&ch,
sizeof(u32), buf, WLC_IOCTL_SMLEN, 0, NULL);
@@ -7192,6 +7315,10 @@ wl_is_5g_restricted(struct bcm_cfg80211 *cfg, chanspec_t chspec)
{
bool is_5g_restricted = FALSE;
+ if (!wf_chspec_valid(chspec)) {
+ return TRUE;
+ }
+
is_5g_restricted = (wl_is_chanspec_restricted(cfg, chspec) ||
#ifdef WL_UNII4_CHAN
(CHSPEC_IS5G(chspec) &&
@@ -7207,6 +7334,10 @@ wl_is_2g_restricted(struct bcm_cfg80211 *cfg, chanspec_t chspec)
{
bool is_2g_restricted = FALSE;
+ if (!wf_chspec_valid(chspec)) {
+ return TRUE;
+ }
+
is_2g_restricted = wl_is_chanspec_restricted(cfg, chspec);
return is_2g_restricted;
@@ -7289,7 +7420,8 @@ wl_handle_ap_sta_mlo_concurrency(struct bcm_cfg80211 *cfg, struct net_info *mld_
} else if (CHSPEC_IS2G(sta_chanspecs[WLC_BAND_2G]) &&
(parameter->freq_bands & WLC_BAND_2G)) {
if (!wl_is_2g_restricted(cfg, sta_chanspecs[WLC_BAND_2G]) &&
- (wl_is_link_sleepable(cfg, pri_chspec, sta_chanspecs[WLC_BAND_2G]))) {
+ (!wf_chspec_valid(sta_chanspecs[WLC_BAND_2G]) ||
+ wl_is_link_sleepable(cfg, pri_chspec, sta_chanspecs[WLC_BAND_2G]))) {
/* attempt SCC, else fail the ACS */
scc_case = wl_acs_check_scc(cfg, parameter, sta_chanspecs[WLC_BAND_2G],
qty, pList);
@@ -7305,7 +7437,21 @@ wl_handle_ap_sta_mlo_concurrency(struct bcm_cfg80211 *cfg, struct net_info *mld_
/* In case of no SCC success and incoming AP band has 2G */
if (!scc_case && (parameter->freq_bands & WLC_BAND_2G)) {
/* If 2G band is secondary link attempt SCC, else fail the ACS */
- if (!(sta_bands & WLC_BAND_2G)) {
+ if (sta_bands & WLC_BAND_2G) {
+ if (!wl_is_2g_restricted(cfg, sta_chanspecs[WLC_BAND_2G])) {
+ /* attempt SCC, else fail the ACS */
+ scc_case = wl_acs_check_scc(cfg, parameter,
+ sta_chanspecs[WLC_BAND_2G], qty, pList);
+ if (scc_case) {
+ WL_DBG(("2G SCC case 0x%x\n", sta_chanspecs[WLC_BAND_2G]));
+ } else {
+ WL_ERR(("No concurrent channel in 2G. Fail ACS\n"));
+ return BCME_BADARG;
+ }
+ } else {
+ WL_ERR(("Restricted 2G STA channel case \n"));
+ }
+ } else {
/* Attempt ACS with 2G band, since sta is not connected to 2G channel */
parameter->freq_bands &= ~(WLC_BAND_5G | WLC_BAND_6G);
WL_DBG(("Attempting ACS with 2G band\n"));
diff --git a/wl_cfgscan.h b/wl_cfgscan.h
index cbe6d0f..811f58b 100644
--- a/wl_cfgscan.h
+++ b/wl_cfgscan.h
@@ -56,11 +56,14 @@
#define SCAN_PARAMS_VER_2 2u
#define SCAN_PARAMS_VER_3 3u
+#define SCAN_PARAMS_VER_4 4u
/* SCAN_PARAMS V3 and V2 have same size. so use V3 with appriate version param */
#define IS_SCAN_PARAMS_V3_V2(cfg) (((cfg->scan_params_ver == SCAN_PARAMS_VER_3) || \
(cfg->scan_params_ver == SCAN_PARAMS_VER_2)) ? TRUE : FALSE)
#define IS_SCAN_PARAMS_V3(cfg) ((cfg->scan_params_ver == SCAN_PARAMS_VER_3) ? \
TRUE : FALSE)
+#define IS_SCAN_PARAMS_V4(cfg) ((cfg->scan_params_ver == SCAN_PARAMS_VER_4) ? \
+ TRUE : FALSE)
extern s32 wl_escan_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data);
@@ -239,4 +242,6 @@ extern bool wl_is_2g_restricted(struct bcm_cfg80211 *cfg, chanspec_t chspec);
#define WL_MLO_PRMRY_NON_SLEEPABLE
extern bool wl_is_link_sleepable(struct bcm_cfg80211 *cfg, chanspec_t pri_chspec,
chanspec_t target_chspec);
+extern bool wl_cfgscan_chaninfo_restricted(struct bcm_cfg80211 *cfg,
+ struct net_device *dev, u32 chan_info, chanspec_t chspec);
#endif /* _wl_cfgscan_h_ */
diff --git a/wl_cfgvendor.c b/wl_cfgvendor.c
index d1b168a..a381da5 100644
--- a/wl_cfgvendor.c
+++ b/wl_cfgvendor.c
@@ -15628,4 +15628,49 @@ wl_cfgvendor_custom_advlog_disconn(struct bcm_cfg80211 *cfg, wl_assoc_status_t *
return;
}
+void
+wl_cfgvendor_advlog_disassoc_tx(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ uint32 reason, int rssi)
+{
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ wl_assoc_status_t as;
+ s32 ifidx = DHD_BAD_IF;
+ u8 *curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+ struct ether_addr fw_bssid;
+ int err;
+
+ /* In DEAUTH_IND or Beacon loss cases, we already lost contact */
+ bzero(&fw_bssid, sizeof(fw_bssid));
+ err = wldev_ioctl_get(ndev, WLC_GET_BSSID, &fw_bssid, ETHER_ADDR_LEN);
+ if (err) {
+ WL_ERR(("not inform disassoc for already disconnected\n"));
+ return;
+ }
+
+ if (!curbssid) {
+ WL_ERR(("No bssid found\n"));
+ return;
+ }
+
+ ifidx = dhd_net2idx(dhdp->info, ndev);
+ /* Advanced Logging supports only STA mode */
+ if (!DHD_IF_ROLE_STA(dhdp, ifidx)) {
+ return;
+ }
+
+ bzero(&as, sizeof(wl_assoc_status_t));
+ as.ndev = ndev;
+ if (memcpy_s(as.addr, ETH_ALEN, curbssid, ETH_ALEN)) {
+ WL_ERR(("failed to memcpy bssid\n"));
+ return;
+ }
+
+ /* Nomally, FW sends WLC_E_DISASSOC event twice
+ * to avoid printing twice, move it in WLC_DISASSOC sending path
+ * Set WLC_E_DISASSOC forcely instead of WLC_DISASSOC
+ */
+ as.event_type = WLC_E_DISASSOC;
+ as.reason = reason;
+ wl_cfgvendor_advlog_connect_event(&as, FALSE, rssi);
+}
#endif /* WL_CFGVENDOR_CUST_ADVLOG */
diff --git a/wl_cfgvif.c b/wl_cfgvif.c
index 35223bd..dfb33a0 100644
--- a/wl_cfgvif.c
+++ b/wl_cfgvif.c
@@ -1796,22 +1796,246 @@ wl_cfg80211_set_chan_mlo_concurrency(struct bcm_cfg80211 *cfg, struct net_info *
/* STA dominant link 2G case */
} else if (CHSPEC_IS2G(ap_chspec)) {
if (CHSPEC_IS2G(sta_chanspecs[WLC_BAND_2G])) {
- if (!wl_is_2g_restricted(cfg, sta_chanspecs[WLC_BAND_2G]) &&
- IS_CHSPEC_SCC(sta_chanspecs[WLC_BAND_2G], ap_chspec)) {
+ if (!wl_is_2g_restricted(cfg, sta_chanspecs[WLC_BAND_2G])) {
target_chspec = wf_chspec_primary20_chspec(
sta_chanspecs[WLC_BAND_2G]);
WL_DBG(("2G SCC case 0x%x\n", target_chspec));
+ } else {
+ WL_ERR(("Restricted 2G chanspec %x\n", sta_chanspecs[WLC_BAND_2G]));
}
} else {
target_chspec = wf_chspec_primary20_chspec(ap_chspec);
}
}
- WL_INFORM_MEM(("Target chanspec set to %x\n", target_chspec));
+ if (wf_chspec_valid(target_chspec)) {
+ WL_INFORM_MEM(("Target chanspec set to %x\n", target_chspec));
+ } else {
+ WL_ERR(("No valid chanspec available to start the softAP\n"));
+ }
+
return target_chspec;
}
#endif /* WL_MLO */
+static s32
+wl_get_lower_bw_chspec(chanspec_t *chspec)
+{
+ chanspec_t cur_chspec = *chspec;
+ u32 bw = CHSPEC_BW(cur_chspec);
+
+ if (bw == WL_CHANSPEC_BW_320) {
+ bw = WL_CHANSPEC_BW_160;
+ } else if (bw == WL_CHANSPEC_BW_160) {
+ bw = WL_CHANSPEC_BW_80;
+ } else if (bw == WL_CHANSPEC_BW_80) {
+ bw = WL_CHANSPEC_BW_40;
+ } else if (bw == WL_CHANSPEC_BW_40) {
+ bw = WL_CHANSPEC_BW_20;
+ } else {
+ *chspec = INVCHANSPEC;
+ return BCME_ERROR;
+ }
+#ifdef WL_BW320MHZ
+ *chspec = wf_create_chspec_from_primary(wf_chspec_primary20_chan(cur_chspec),
+ bw, CHSPEC_BAND(cur_chspec), 0);
+#else
+ *chspec = wf_create_chspec_from_primary(wf_chspec_primary20_chan(cur_chspec),
+ bw, CHSPEC_BAND(cur_chspec));
+#endif /* WL_BW320MHZ */
+ if (!wf_chspec_valid(*chspec)) {
+ WL_ERR(("invalid chanspec\n"));
+ return BCME_ERROR;
+ }
+
+ WL_INFORM_MEM(("cur_chspec:%x new_chspec:0x%x BW:%d chan:%d\n",
+ cur_chspec, *chspec, bw,
+ wf_chspec_primary20_chan(*chspec)));
+ return BCME_OK;
+}
+
+#define MAX_20MHZ_CHANNELS 16u
+static s32
+wl_get_overlapping_chspecs(chanspec_t sel_chspec,
+ wl_chanspec_attr_v1_t *overlap, u32 *arr_idx)
+{
+ int i, j;
+ u8 max_idx = *arr_idx;
+ u8 chan_idx = 0;
+ u32 band;
+ chanspec_t chspec;
+ u32 chaninfo;
+ wl_chanspec_attr_v1_t new_arr[MAX_20MHZ_CHANNELS];
+ u8 chan_array[MAX_20MHZ_CHANNELS] = {0};
+ s32 ret;
+
+ if (max_idx >= MAX_20MHZ_CHANNELS) {
+ WL_ERR(("invalid arg\n"));
+ return BCME_ERROR;
+ }
+
+ bzero(new_arr, sizeof(new_arr));
+ band = CHSPEC_BAND(sel_chspec);
+ wf_get_all_ext(sel_chspec, chan_array);
+ for (i = 0; i < max_idx; i++) {
+ chspec = overlap[i].chanspec;
+ chaninfo = overlap[i].chaninfo;
+ if (band != CHSPEC_BAND(chspec)) {
+ continue;
+ }
+ for (j = 0; j < MAX_20MHZ_CHANNELS; j++) {
+ if (!chan_array[j]) {
+ /* if list is empty, break */
+ break;
+ }
+ if ((chan_array[j] == CHSPEC_CHANNEL(chspec))) {
+ new_arr[chan_idx].chanspec = chspec;
+ new_arr[chan_idx].chaninfo = chaninfo;
+ WL_DBG(("sel_chspec:%x overlap_chspec:%x\n",
+ sel_chspec, new_arr[chan_idx].chanspec));
+ chan_idx++;
+ /* if a match is found, go to next chanspec */
+ break;
+ }
+ }
+ }
+ *arr_idx = chan_idx;
+ ret = memcpy_s(overlap, (sizeof(wl_chanspec_attr_v1_t) * (*arr_idx)),
+ new_arr, (sizeof(wl_chanspec_attr_v1_t) * chan_idx));
+ if (ret) {
+ WL_ERR(("memcpy failed for chan arry copy. arr_idx:%d"
+ " new_arr_idx:%d\n", max_idx, chan_idx));
+ return BCME_ERROR;
+ }
+
+ return BCME_OK;
+}
+
+static s32
+wl_filter_restricted_subbands(struct bcm_cfg80211 *cfg,
+ struct net_device *dev, chanspec_t *cur_chspec)
+{
+ wl_chanspec_list_v1_t *chan_list = NULL;
+ u16 list_count;
+ u32 i, j, k;
+ u32 arr_idx = 0;
+ u32 chaninfo = 0;
+ chanspec_t chspec;
+ bool retry_bw = FALSE;
+ chanspec_t sel_chspec = *cur_chspec;
+ u32 bw;
+ wl_chanspec_attr_v1_t overlap[MAX_20MHZ_CHANNELS];
+ u8 chan_array[MAX_20MHZ_CHANNELS] = {0};
+ s32 err = BCME_OK;
+ u32 tot_size = 0;
+ u32 band;
+
+ chan_list = (wl_chanspec_list_v1_t *)MALLOCZ(cfg->osh, CHANINFO_LIST_BUF_SIZE);
+ if (chan_list == NULL) {
+ WL_ERR(("failed to allocate local buf\n"));
+ return BCME_NOMEM;
+ }
+
+ /* get latest udpated chan info list */
+ err = wldev_iovar_getbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg), "chan_info_list", NULL,
+ 0, chan_list, CHANINFO_LIST_BUF_SIZE, 0, NULL);
+ if (err) {
+ MFREE(cfg->osh, chan_list, CHANINFO_LIST_BUF_SIZE);
+ WL_ERR(("get chan_info_list err(%d)\n", err));
+ err = BCME_ERROR;
+ goto exit;
+ }
+
+ if (chan_list->version != CHAN_INFO_LIST_ALL_V1) {
+ WL_ERR(("version mismatch! incoming:%d supported_ver:%d\n",
+ chan_list->version, CHAN_INFO_LIST_ALL_V1));
+ err = BCME_ERROR;
+ goto exit;
+ }
+
+ list_count = chan_list->count;
+ if (!list_count) {
+ WL_ERR(("empty list\n"));
+ err = BCME_ERROR;
+ goto exit;
+ }
+
+ tot_size = (sizeof(wl_chanspec_attr_v1_t) * list_count) + (sizeof(u16) * 2);
+ if (tot_size >= CHAN_LIST_BUF_LEN) {
+ WL_ERR(("exceeds buffer size:%d\n", list_count));
+ /* enforce failure */
+ err = BCME_ERROR;
+ goto exit;
+ }
+
+ band = CHSPEC_BAND(sel_chspec);
+ wf_get_all_ext(sel_chspec, chan_array);
+ bzero(overlap, sizeof(overlap));
+ for (i = 0; i < dtoh32(list_count); i++) {
+ chspec = dtoh32(chan_list->chspecs[i].chanspec);
+ chaninfo = dtoh32(chan_list->chspecs[i].chaninfo);
+
+ /* get overlapping chanspec, chaninfo details based on current chanspec */
+ if ((CHSPEC_BAND(chspec) == band) && (CHSPEC_BW(chspec) == WL_CHANSPEC_BW_20)) {
+ for (j = 0; j < MAX_20MHZ_CHANNELS; j++) {
+ if (!chan_array[j]) {
+ /* if entry is empty, break */
+ break;
+ }
+ if (chan_array[j] == CHSPEC_CHANNEL(chspec)) {
+ overlap[arr_idx].chanspec = chspec;
+ overlap[arr_idx].chaninfo = chaninfo;
+ WL_DBG(("sel_chspec:%x overlap_chspec:%x\n",
+ sel_chspec, overlap[arr_idx].chanspec));
+ arr_idx++;
+ break;
+ }
+ }
+ }
+ }
+
+ do {
+ bw = CHSPEC_BW(sel_chspec);
+ WL_INFORM_MEM(("chanspec_req:0x%x BW:%d overlap_channels:%d\n",
+ sel_chspec, bw, arr_idx));
+ for (k = 0; k < arr_idx; k++) {
+ retry_bw = FALSE;
+ if (wl_cfgscan_chaninfo_restricted(cfg, dev, overlap[k].chaninfo,
+ overlap[k].chanspec)) {
+ if ((bw == WL_CHANSPEC_BW_80) || (bw == WL_CHANSPEC_BW_40) ||
+ (bw == WL_CHANSPEC_BW_160) || (bw == WL_CHANSPEC_BW_320)) {
+ if (wl_get_lower_bw_chspec(&sel_chspec) != BCME_OK) {
+ WL_INFORM_MEM(("wl_get_lower_bw_chspec failed.\n"));
+ err = BCME_ERROR;
+ goto exit;
+ }
+ if (wl_get_overlapping_chspecs(sel_chspec,
+ overlap, &arr_idx) != BCME_OK) {
+ WL_INFORM_MEM(("get overlap arr failed\n"));
+ err = BCME_ERROR;
+ goto exit;
+ }
+ /* try with new BW */
+ retry_bw = TRUE;
+ break;
+ } else {
+ WL_ERR(("chspec:0x%x No lower BW available\n", sel_chspec));
+ sel_chspec = INVCHANSPEC;
+ }
+ }
+ }
+ } while ((sel_chspec != INVCHANSPEC) && (retry_bw));
+
+exit:
+ WL_INFORM_MEM(("selected chanspec:0x%x\n", sel_chspec));
+ *cur_chspec = sel_chspec;
+
+ /* free chan_list memory after use */
+ MFREE(cfg->osh, chan_list, CHANINFO_LIST_BUF_SIZE);
+
+ return err;
+}
+
s32
wl_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_channel *chan,
@@ -1889,7 +2113,7 @@ wl_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
if (mlo_num_links > 1) {
chspec = wl_cfg80211_set_chan_mlo_concurrency(cfg, mld_netinfo, chspec);
if (chspec == INVCHANSPEC) {
- WL_ERR(("Invalid target chanspec, MLO case %x\n", chspec));
+ WL_ERR(("Invalid target chanspec, MLO case\n"));
return -EINVAL;
}
} else {
@@ -2083,15 +2307,22 @@ set_channel:
if (wf_chspec_valid(cur_chspec)) {
/* convert 802.11 ac chanspec to current fw chanspec type */
cur_chspec = wl_chspec_host_to_driver(cur_chspec);
- WL_INFORM_MEM(("set chanspec 0x%x\n", cur_chspec));
if (cur_chspec != INVCHANSPEC) {
- if ((err = wldev_iovar_setint(dev, "chanspec",
- cur_chspec)) == BCME_BADCHAN) {
+ err = wl_filter_restricted_subbands(cfg, dev, &cur_chspec);
+ if (err) {
+ return err;
+ }
+ WL_INFORM_MEM(("set chanspec 0x%x\n", cur_chspec));
+ err = wldev_iovar_setint(dev, "chanspec", cur_chspec);
+ if (err) {
+ WL_ERR(("set chanspec failed for %x\n", cur_chspec));
+ }
+ if (err == BCME_BADCHAN) {
u32 local_channel = wf_chspec_center_channel(cur_chspec);
+ bw = CHSPEC_BW(cur_chspec);
/* For failure cases, attempt BW downgrade */
- WL_ERR(("set chanspec failed for %x\n", cur_chspec));
- if ((bw == WL_CHANSPEC_BW_80) || (bw == WL_CHANSPEC_BW_160) ||
- (bw == WL_CHANSPEC_BW_320))
+ if ((bw == WL_CHANSPEC_BW_80) || (bw == WL_CHANSPEC_BW_40) ||
+ (bw == WL_CHANSPEC_BW_160) || (bw == WL_CHANSPEC_BW_320))
goto change_bw;
err = wldev_ioctl_set(dev, WLC_SET_CHANNEL,
&local_channel, sizeof(local_channel));
@@ -3246,7 +3477,7 @@ static s32 wl_cfg80211_bcn_set_params(
struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
s32 err = BCME_OK;
- WL_DBG(("interval (%d) \ndtim_period (%d) \n",
+ WL_DBG(("interval (%d) dtim_period (%d) \n",
info->beacon_interval, info->dtim_period));
if (info->beacon_interval) {
@@ -5307,7 +5538,7 @@ exit:
#endif /* WL_CFG80211_STA_EVENT || KERNEL_VER < 3.2 */
#ifdef WL_MLO
-static void
+static s32
wl_update_mlo_peer_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, const u8 *addr)
{
dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
@@ -5323,19 +5554,20 @@ wl_update_mlo_peer_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, const
wl_mlo_link_status_v2_t *mst_link;
wl_mlo_link_peer_info_v2_t *peer_info;
s32 ifidx;
- s32 ret;
+ s32 ret = 0;
u32 mlo_status_len = sizeof(wl_mlo_status_v2_t);
u32 i, j;
+ bool match_found = false;
/* Apply MLO config from connect context if chip supports it. */
if (!cfg->mlo.supported) {
- return;
+ return BCME_UNSUPPORTED;
}
ifidx = dhd_net2idx(dhdp->info, ndev);
if (ifidx < 0) {
WL_ERR(("invalid ifidx\n"));
- return;
+ return BCME_BADARG;
}
bzero(&mlo_peer_info, sizeof(dhd_mlo_peer_info_t));
@@ -5401,17 +5633,24 @@ wl_update_mlo_peer_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, const
&mlo_peer_info.link_info[mlo_peer_info.num_links].link_addr),
mlo_peer_info.link_info[mlo_peer_info.num_links].chspec));
mlo_peer_info.num_links++;
+
+ match_found = true;
}
}
}
- /* Update mlo peer info in sta info */
- dhd_update_mlo_peer_info(dhdp, ifidx, addr, &mlo_peer_info);
+ if (match_found) {
+ /* Update mlo peer info in sta info */
+ dhd_update_mlo_peer_info(dhdp, ifidx, addr, &mlo_peer_info);
+ } else {
+ ret = BCME_NOTFOUND;
+ }
exit:
if (iovar_buf) {
MFREE(cfg->osh, iovar_buf, iovar_buf_len);
}
+ return ret;
}
s32
@@ -5512,22 +5751,15 @@ wl_notify_connect_status_ap(struct bcm_cfg80211 *cfg, struct net_device *ndev,
#if defined(WL_CFG80211_STA_EVENT) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
struct station_info sinfo;
#endif /* (LINUX_VERSION >= VERSION(3,2,0)) || !WL_CFG80211_STA_EVENT */
+#ifdef BIGDATA_SOFTAP
dhd_pub_t *dhdp;
+#endif /* BIGDATA_SOFTAP */
bool cancel_timeout = FALSE;
+ s32 ret = 0;
WL_INFORM_MEM(("[%s] Mode AP/GO. Event:%d status:%d reason:%d\n",
ndev->name, event, ntoh32(e->status), reason));
- dhdp = (dhd_pub_t *)(cfg->pub);
- /* Check the current op_mode */
- if (dhdp &&
- ((ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) &&
- !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE))) {
- WL_ERR(("unsupported op mode: %d, non-cfg ap, iftype %d\n",
- dhdp->op_mode, ndev->ieee80211_ptr->iftype));
- return BCME_OK;
- }
-
#ifdef WL_CLIENT_SAE
if (event == WLC_E_AUTH && ntoh32(e->auth_type) == DOT11_SAE) {
err = wl_handle_auth_event(cfg, ndev, e, data);
@@ -5624,6 +5856,7 @@ wl_notify_connect_status_ap(struct bcm_cfg80211 *cfg, struct net_device *ndev,
if (event == WLC_E_LINK && reason == WLC_E_LINK_BSSCFG_DIS) {
WL_ERR(("AP link down - skip get sta data\n"));
} else {
+ dhdp = (dhd_pub_t *)(cfg->pub);
if (dhdp && dhdp->op_mode & DHD_FLAG_HOSTAP_MODE) {
dhd_schedule_gather_ap_stadata(cfg, ndev, e);
}
@@ -5654,9 +5887,11 @@ wl_notify_connect_status_ap(struct bcm_cfg80211 *cfg, struct net_device *ndev,
* For non MLO clients wl_update_sta_chanspec_info will be
* called.
*/
- wl_update_mlo_peer_info(cfg, ndev, e->addr.octet);
+ ret = wl_update_mlo_peer_info(cfg, ndev, e->addr.octet);
#endif /* WL_MLO */
- wl_update_sta_chanspec_info(cfg, ndev, e->addr.octet);
+ if (ret == BCME_NOTFOUND) {
+ wl_update_sta_chanspec_info(cfg, ndev, e->addr.octet);
+ }
sinfo.assoc_req_ies = data;
sinfo.assoc_req_ies_len = len;
@@ -8732,11 +8967,13 @@ wl_cfgvif_bssid_match_found(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev,
break;
}
}
- } else {
+ }
+
+ if (found == FALSE) {
/* legacy bssid */
if (!memcmp(curbssid, mac_addr, ETHER_ADDR_LEN)) {
- found = TRUE;
- WL_DBG(("matching bssid found\n"));
+ found = TRUE;
+ WL_DBG(("matching bssid found\n"));
}
}
diff --git a/wldev_common.c b/wldev_common.c
index e092397..572c252 100644
--- a/wldev_common.c
+++ b/wldev_common.c
@@ -437,28 +437,46 @@ static s32
wldev_per_link_ioctl_set(
struct net_device *dev, u8 link_idx, u32 cmd, const void *arg, u32 len)
{
- s8 iovar_buf[WLC_IOCTL_SMLEN];
+ s8 *iovar_buf = NULL;
s32 ret = 0;
s32 iovar_len;
+ s32 alloc_len = 0;
+
+ alloc_len = WLC_IOCTL_SMLEN + len;
+ iovar_buf = (s8 *)kzalloc(alloc_len, GFP_KERNEL);
+ if (unlikely(!iovar_buf)) {
+ WL_ERR(("iovar_buf alloc failed\n"));
+ return BCME_NOMEM;
+ }
- iovar_len = wldev_link_mkioctl(cmd, link_idx, arg, len, iovar_buf, sizeof(iovar_buf));
+ iovar_len = wldev_link_mkioctl(cmd, link_idx, arg, len, iovar_buf, alloc_len);
if (iovar_len > 0) {
ret = wldev_ioctl_set(dev, WLC_SET_VAR, iovar_buf, iovar_len);
} else {
ret = BCME_BUFTOOSHORT;
}
+ kfree(iovar_buf);
+
return ret;
}
static s32
wldev_per_link_ioctl_get(struct net_device *dev, u8 link_idx, u32 cmd, void *arg, u32 len)
{
- s8 iovar_buf[WLC_IOCTL_SMLEN];
+ s8 *iovar_buf = NULL;
s32 ret = 0;
s32 iovar_len;
+ s32 alloc_len = 0;
+
+ alloc_len = WLC_IOCTL_SMLEN + len;
+ iovar_buf = (s8 *)kzalloc(alloc_len, GFP_KERNEL);
+ if (unlikely(!iovar_buf)) {
+ WL_ERR(("iovar_buf alloc failed\n"));
+ return BCME_NOMEM;
+ }
- iovar_len = wldev_link_mkioctl(cmd, link_idx, arg, len, iovar_buf, sizeof(iovar_buf));
+ iovar_len = wldev_link_mkioctl(cmd, link_idx, arg, len, iovar_buf, alloc_len);
if (iovar_len > 0) {
ret = wldev_ioctl_get(dev, WLC_GET_VAR, iovar_buf, iovar_len);
if (ret == 0) {
@@ -468,6 +486,8 @@ wldev_per_link_ioctl_get(struct net_device *dev, u8 link_idx, u32 cmd, void *arg
ret = BCME_BUFTOOSHORT;
}
+ kfree(iovar_buf);
+
return ret;
}